From 2b85cf2405c900c22b5298b1735c5797ad1d7b5c Mon Sep 17 00:00:00 2001 From: Hoffmann <hoffmann@mathematik.tu-darmstadt.de> Date: Mon, 4 Nov 2024 13:03:47 +0100 Subject: [PATCH] initial commit --- 1d/main.py | 196 +++ 1d/myfun.py | 315 +++++ 2d/__pycache__/myfun.cpython-312.pyc | Bin 0 -> 82170 bytes 2d/compute_errorestimates.py | 252 ++++ 2d/exact_error.py | 201 +++ 2d/main_morley.py | 304 +++++ 2d/myfun.py | 1837 ++++++++++++++++++++++++++ 2d/plot_aposti_estimator.py | 94 ++ 2d/plot_bubble.py | 56 + 2d/plot_meshes.py | 47 + 2d/plot_num-sol.py | 146 ++ 2d/scaling_space_errorestimates.py | 301 +++++ 2d/scaling_time_errorestimates.py | 301 +++++ 13 files changed, 4050 insertions(+) create mode 100644 1d/main.py create mode 100644 1d/myfun.py create mode 100644 2d/__pycache__/myfun.cpython-312.pyc create mode 100644 2d/compute_errorestimates.py create mode 100644 2d/exact_error.py create mode 100644 2d/main_morley.py create mode 100644 2d/myfun.py create mode 100644 2d/plot_aposti_estimator.py create mode 100644 2d/plot_bubble.py create mode 100644 2d/plot_meshes.py create mode 100644 2d/plot_num-sol.py create mode 100644 2d/scaling_space_errorestimates.py create mode 100644 2d/scaling_time_errorestimates.py diff --git a/1d/main.py b/1d/main.py new file mode 100644 index 0000000..5644f25 --- /dev/null +++ b/1d/main.py @@ -0,0 +1,196 @@ +import myfun as my +import numpy as np +from matplotlib import pyplot as plt + +# CHOOSE METHOD +method = 'wasserstein' #'wasserstein' or 'upwinding' + +T = 30*10**(-3) # time interval [0,T] + +Nx = 103 # choose fineness of spatial discretization +Nt = 200 # choose fineness of temporal discretization +hx = 1/(Nx+2) # assumption equidistant mesh, spatial mesh size +ht = (T)/(Nt+1) # assumption equidistant steps, temporal mesh size + +print('hx',hx) +print('ht',ht) + +faces = np.linspace(0,1,Nx+3) # Cell interfaces x_(i+1/2) for i=0,1,...,N and boundary faces x_-1/2 = 0, x_N+1/2 = 1. +# +3 as we have N+1 interfaces and 2 boundary faces +faces = np.array(faces) # [-1, 0,1,2,...,N ,N+1] -> length = Nx+3 + +# compute array of midpoints +midpoints = [] +for i in range(Nx+2) : + midpoints.append((faces[i]+faces[i+1])/2) # cell midpoints +midpoints = np.array(midpoints) # -> length = Nx+2 + +# pre-allocate +rho = np.zeros([Nt,Nx+2]) +cc = np.zeros([Nt,Nx+2]) + +# CHOOSE INITIAL CONDITION +rho_0 = lambda x: 1.3*np.sin(np.pi*x)*np.exp(-25*(x-1/2)**2) # pre-factors scales measure to sum up to 1 over midpoints +# rho_0 = lambda x: (np.sin(np.pi*x)*np.exp(-50*(x-1/4)**2) + np.sin(np.pi*x)*np.exp(-50*(x-3/4)**2)) # double gaussian + +# discrete initial condition via evalutation at midpoints, i.e. projection to discrete space +rho_h0 = rho_0(midpoints) # initial value t = 0, N+3 faces result in N+2 elements, need to sum to 1 (represents measure), may not start with uniform distribution + +# save initial discrete rho0 +rho[0][:] = rho_h0 # t=0 + +# initialize object +discr_E = [] + + +if method == 'upwinding' : + + for n in range(Nt-1) : # go through time intervals + + # GET C FROM RHO + c = my.getc(rho[n,:],midpoints,faces,hx) # paramaters to get function c_h^n(x) via c_h^n(x) = sum_i (c_i*hat_i(x)) + cc[n,:] = c + + # get discrete energy + discr_E.append(my.discrete_energy(c,rho[n,:],hx)) + rhs = [] + + # APPLY NUMERICAL SCHEME + for i in range(Nx+2) : # assumption gamma = 1, len(rho) = Nx+2, going over midpoints + #print(i) + if i == Nx+2-1 : # periodic boundary data + dxch_irechts = c[i]*my.dxhat(i,midpoints,hx,faces[0])+c[0]*my.dxhat(0,midpoints,hx,faces[0]) + dxch_ilinks = c[i-1]*my.dxhat(i-1,midpoints,hx,faces[i])+c[i]*my.dxhat(i,midpoints,hx,faces[i]) #x_{i-1/2} + + if dxch_irechts >= 0 : + if dxch_ilinks >= 0 : + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][i]-dxch_ilinks*rho[n][i-1])) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][i]-dxch_ilinks*rho[n][i])) + else : + if dxch_ilinks >= 0: + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][0]-dxch_ilinks*rho[n][i-1])) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][0]-dxch_ilinks*rho[n][i])) + + else : # interior faces + dxch_irechts = c[i]*my.dxhat(i,midpoints,hx,faces[i+1])+c[i+1]*my.dxhat(i+1,midpoints,hx,faces[i+1]) + dxch_ilinks = c[i-1]*my.dxhat(i-1,midpoints,hx,faces[i])+c[i]*my.dxhat(i,midpoints,hx,faces[i]) + + if dxch_irechts >= 0 : + if dxch_ilinks >= 0: + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][i]-dxch_ilinks*rho[n][i-1])) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][i]-dxch_ilinks*rho[n][i])) + else : + if dxch_ilinks >= 0: + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][i+1]-dxch_ilinks*rho[n][i-1])) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_irechts*rho[n][i+1]-dxch_ilinks*rho[n][i])) + + rho[n+1][:] = my.update_in_time(rhs,Nx,ht,hx) + + discr_E = np.array(discr_E) + + +elif method == 'wasserstein' : + + for n in range(Nt-1) : # go through time intervals + + # GET C FROM RHO + c = my.getc(rho[n,:],midpoints,faces,hx) # paramaters to get function c_h^n(x) via c_h^n(x) = sum_i (c_i*hat_i(x)) + cc[n,:] = c + + # get discrete energy + discr_E.append(my.discrete_energy(c,rho[n,:],hx)) + rhs = [] + + # APPLY NUMERICAL SCHEME + for i in range(Nx+2) : # assumption gamma = 1, len(rho) = Nx+2 + if i == Nx+2-1 : # periodic boundary data + dxch_iplus = c[i]*my.dxhat(i,midpoints,hx,faces[i+1])+c[0]*my.dxhat(0,midpoints,hx,faces[i+1]) + dxch_iminus = c[i-1]*my.dxhat(i-1,midpoints,hx,faces[i])+c[i]*my.dxhat(i,midpoints,hx,faces[i]) + if np.abs(rho[n][i]-rho[n][i-1]) < 10**-6 or rho[n][i] < 10**-6 or rho[n][i-1] < 10**-6 : + if np.abs(rho[n][0]-rho[n][i]) < 10**-6 or rho[n][i] < 10**-6 or rho[n][0] < 10**-6 : + rhs.append(rho[n][i] - ht/hx*(dxch_iplus*1/2*(rho[n][0]+rho[n][i])-dxch_iminus*1/2*(rho[n][i]+rho[n][i-1]))) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_iplus*(rho[n][0]-rho[n][i])/(np.log(rho[n][0])-np.log(rho[n][i]))-dxch_iminus*1/2*(rho[n][i]+rho[n][i-1]))) + else : + if np.abs(rho[n][0]-rho[n][i]) < 10**-6 or rho[n][i] < 10**-6 or rho[n][0] < 10**-6 : + rhs.append(rho[n][i] - ht/hx*1/2*(dxch_iplus*(rho[n][0]+rho[n][i])-dxch_iminus*(rho[n][i]-rho[n][i-1])/(np.log(rho[n][i])-np.log(rho[n][i-1])))) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_iplus*(rho[n][0]-rho[n][i])/(np.log(rho[n][0])-np.log(rho[n][i]))-dxch_iminus*(rho[n][i]-rho[n][i-1])/(np.log(rho[n][i])-np.log(rho[n][i-1])))) + + else : # interior faces + dxch_iplus = c[i]*my.dxhat(i,midpoints,hx,faces[i+1])+c[i+1]*my.dxhat(i+1,midpoints,hx,faces[i+1]) + dxch_iminus = c[i-1]*my.dxhat(i-1,midpoints,hx,faces[i])+c[i]*my.dxhat(i,midpoints,hx,faces[i]) + if np.abs(rho[n][i]-rho[n][i-1]) < 10**-6 or rho[n][i] < 10**-6 or rho[n][i-1] < 10**-6 : + if np.abs(rho[n][i+1]-rho[n][i]) < 10**-6 or rho[n][i] < 10**-6 or rho[n][i+1] < 10**-6 : + rhs.append(rho[n][i] - ht/hx*(dxch_iplus*1/2*(rho[n][i+1]+rho[n][i])-dxch_iminus*1/2*(rho[n][i]+rho[n][i-1]))) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_iplus*(rho[n][i+1]-rho[n][i])/(np.log(rho[n][i+1])-np.log(rho[n][i]))-dxch_iminus*1/2*(rho[n][i]+rho[n][i-1]))) + else : + if np.abs(rho[n][i+1]-rho[n][i]) < 10**-6 or rho[n][i] < 10**-6 or rho[n][i+1] < 10**-6 : + rhs.append(rho[n][i] - ht/hx*1/2*(dxch_iplus*(rho[n][i+1]+rho[n][i])-dxch_iminus*(rho[n][i]-rho[n][i-1])/(np.log(rho[n][i])-np.log(rho[n][i-1])))) + else : + rhs.append(rho[n][i] - ht/hx*(dxch_iplus*(rho[n][i+1]-rho[n][i])/(np.log(rho[n][i+1])-np.log(rho[n][i]))-dxch_iminus*(rho[n][i]-rho[n][i-1])/(np.log(rho[n][i])-np.log(rho[n][i-1])))) + + rho[n+1][:] = my.update_in_time(rhs,Nx,ht,hx) + + # PLOT FUNCTION PROFILES AS SNAPSHOTS AT TIME STEP n + # does not work for all Nx + if n == 42: + c_h = lambda x : my.cfunc(cc[n][:],midpoints,hx,x) + lin_rho = lambda x : my.linear_interpol(rho[n][:],midpoints,hx,x) + const_rho = lambda x: my.const_rho(rho[n][:],midpoints,hx,x) + + #compute function values + xi = np.linspace(0,1,300) + val1 = [] + val2 = [] + val3 = [] + for x in xi: + val1.append(const_rho(x)) + val2.append(lin_rho(x)) + val3.append(c_h(x)) + + plt.plot(xi,val1,'steelblue',xi,val2,'deepskyblue') + plt.plot(midpoints,np.zeros(len(midpoints)),'r.') + plt.xlabel('x') + plt.ylabel('bacterial density') + plt.legend(['finite volume sol.','lin. interpolation','midpoints x_i']) + time = n*ht + title = 'bacterial density at time '+'%f' % time + plt.title(title) + plt.show() + + plt.plot(xi,val3,'green') + plt.plot(midpoints,np.zeros(len(midpoints)),'r.') + plt.xlabel('x') + plt.ylabel('chemical concentration') + title = 'chemical concentration at time '+'%f' % time + plt.title(title) + plt.legend(['finite element sol.','midpoints x_i']) + plt.show() + + + # save discrete energy for this time step + discr_E = np.array(discr_E) + +else : + print('Error: Wrong string input') + + +# HEATMAP PLOT OF EVOLUTION +plt.pcolormesh(rho,cmap = 'jet') +plt.xlabel('space') +plt.ylabel('time') +plt.colorbar() +plt.show() + +# PLOT DISCRETE ENERGY +plt.plot(np.linspace(0,T,Nt-1),discr_E) +plt.xlabel('time') +plt.ylabel('discrete Energy') +plt.title('Discrete energy dissipation') +plt.show() diff --git a/1d/myfun.py b/1d/myfun.py new file mode 100644 index 0000000..ed371df --- /dev/null +++ b/1d/myfun.py @@ -0,0 +1,315 @@ +import numpy as np +from scipy.sparse import csr_matrix +from scipy.sparse.linalg import spsolve + + +# FINITE ELEMENT + +def linear_interpol(rho,midpoints,h,x) : + # Input : current approximation rho^n, midpoints, mesh size h, point x + # output : value of linear interpolation at x + + # find element in which x lies use midpoint[i] = h(i+1/2) + + i0 = int(np.floor((x/h-1/2))) + i1 = int(np.ceil((x/h-1/2))) + + if i0 == -1 : # periodic boundary condition + y1 = rho[i1] + x1 = midpoints[i1] + y0 = 1/2*(rho[0]+rho[-1]) + x0 = 0 + + #get linear interpolation + m = (y1-y0)/(x1-x0) # /h + b = y1-m*x1 + + y = m*x+b + + elif i1 == len(midpoints) : # periodic boundary condition + y1 = 1/2*(rho[0]+rho[-1]) + x1 = 1 + y0 = rho[i0] + x0 = midpoints[i0] + + #get linear interpolation + m = (y1-y0)/(x1-x0) # /h + b = y1-m*x1 + + y = m*x+b + + else : + + if i0 == i1 : # falls rundungsfehler + i0 -= 1 + + y1 = rho[i1] + x1 = midpoints[i1] + y0 = rho[i0] + x0 = midpoints[i0] + + #get linear interpolation + m = (y1-y0)/(x1-x0) # /h + b = y1-m*x1 + + y = m*x+b + + return y + + +def hat(i,midpoints,h,x) : + + values = np.zeros(len(midpoints)) + values[i] = 1 + + y = linear_interpol(values,midpoints,h,x) + + return y + + +def dxhat(i,midpoints,h,x) : + + values = np.zeros(len(midpoints)) + values[i] = 1 + + # find element in which x lies use midpoint[i] = h(i+1/2) + i0 = int(np.floor((x/h-1/2))) + i1 = int(np.ceil((x/h-1/2))) + + if i0 == -1: # periodic boundary condition + y1 = values[i1] + x1 = midpoints[i1] + y0 = 1/2*(values[0]+values[-1]) + x0 = 0 + + #get linear interpolation + m = (y1-y0)/(x1-x0) # /h + + + elif i1 == len(midpoints) : # periodic boundary condition + y1 = 1/2*(values[0]+values[-1]) + x1 = 1 + y0 = values[i0] + x0 = midpoints[i0] + + #get linear interpolation + m = (y1-y0)/(x1-x0) # /h + + else : + + y1 = values[i1] + x1 = midpoints[i1] + y0 = values[i0] + x0 = midpoints[i0] + + #get linear interpolation + m = (y1-y0)/(x1-x0) # /h + + + return m + + +def quadrature(I,g) : + # inupt: interval I = [a,b] + + areaI = I[1]-I[0] + + trafo = lambda y : y*(I[1]-I[0])/2+(I[1]+I[0])/2 # y in [-1,1] + + xi = np.array([-np.sqrt(1/3),np.sqrt(1/3)]) #on [-1,1] + #xi = [-1,1] + wi = [1,1] + + # xi = [0] + # wi = [2] + + # xi = np.array([-np.sqrt(3/5),0,np.sqrt(3/5)]) #on [-1,1] + # wi = [5/9,8/9,5/9] + + val = 0 + for k in range(len(wi)) : + val += wi[k]*g(trafo(xi[k])) # get integral + + avrg = val/2 # get average + integral = avrg*areaI + + return integral + +def midpoint_rule(I,g) : + # inupt: interval I = [a,b] + + areaI = I[1]-I[0] + + trafo = lambda y : y*(I[1]-I[0])/2+(I[1]+I[0])/2 # y in [-1,1] + + xi = [0] + wi = [2] + + val = 0 + for k in range(len(wi)) : + val += wi[k]*g(trafo(xi[k])) # get integral + + avrg = val/2 # get average + integral = avrg*areaI + + return integral + +def trapezoidal_rule(I,g) : + # inupt: interval I = [a,b] + + areaI = I[1]-I[0] + + trafo = lambda y : y*(I[1]-I[0])/2+(I[1]+I[0])/2 # y in [-1,1] + + xi = [-1,1] + wi = [1,1] + + val = 0 + for k in range(len(wi)) : + val += wi[k]*g(trafo(xi[k])) # get integral + + avrg = val/2 # get average + integral = avrg*areaI + + return integral + + +def getc(rho,midpoints,faces,h) : + + + lin_rho = lambda x : linear_interpol(rho,midpoints,h,x) + + row = [] + col = [] + data1 = [] + data2 = [] + b = [] + + for i in range(len(midpoints)) : + + f = lambda x: hat(i,midpoints,h,x)*lin_rho(x) + + # if i == (len(midpoints)-1): + # rhs = quadrature([midpoints[i-1],1],f) + quadrature([0,midpoints[1]],f) # periodic boundary + # elif i == 0 : + # rhs = quadrature([0,midpoints[i+1]],f) + quadrature([midpoints[-1],1],f) # periodic boundary + # else : + # rhs = quadrature([midpoints[i-1],midpoints[i+1]],f) + + rhs = midpoint_rule([faces[i],faces[i+1]],f) + + for j in range(len(midpoints)) : + if abs(i-j) < 2 : + g = lambda x: hat(i,midpoints,h,x)*hat(j,midpoints,h,x) + dg = lambda x: dxhat(i,midpoints,h,x)*dxhat(j,midpoints,h,x) + + if i == (len(midpoints)-1) : + val2 = trapezoidal_rule([midpoints[i],1],g) + trapezoidal_rule([0,midpoints[0]],g) + # val2 = midpoint_rule([midpoints[i],1],g) + midpoint_rule([0,midpoints[0]],g) + + else : + + val2 = trapezoidal_rule([midpoints[i],midpoints[i+1]],g) # for A2 + # val2 = midpoint_rule([midpoints[i],midpoints[i+1]],g) # for A1 + + val1 = trapezoidal_rule([faces[i],faces[i+1]],dg) + # val2 = trapezoidal_rule([midpoints[i],midpoints[i+1]],g) + + row.append(i) + col.append(j) + data1.append(val1) + data2.append(val2) + b.append(rhs) + + + + sparseA1 = csr_matrix((data1, (row, col)), shape = (len(midpoints), len(midpoints)))#.toarray() + sparseA2 = csr_matrix((data2, (row, col)), shape = (len(midpoints), len(midpoints)))#.toarray() + + sparseA = sparseA1 + sparseA2 + + # plt.spy(sparseA2.toarray()) + # plt.show() + + c = spsolve(sparseA, b, permc_spec=None, use_umfpack=True) # solve system of equations + # c_h^n(x) = sum_i (c_i*hat_i(x)) + return c + + +def cfunc(cc,midpoints,h,x) : + + i0 = int(np.floor((x/h-1/2))) + i1 = int(np.ceil((x/h-1/2))) + + if i1 == len(midpoints) : + i1 = 0 + + # print(ii) + val = 0 + for i in [i0,i1] : + val += cc[i]*hat(i,midpoints,h,x) + + return val + + +def const_rho(rho,midpoints,h,x) : + + i = int(np.round((x/h-1/2))) + val = rho[i] + + return val + + +def update_in_time(rhs,Nx,ht,hx) : + + row = [] + col = [] + data = [] + + row.append(0) # periodic boundary condition + col.append(Nx+1) + data.append(-ht/(hx**2)) + + for i in range(Nx+2) : + for j in range(Nx+2) : + if abs(i-j)<2: + if i == j : + val = 1+(2*ht)/(hx**2) + else : + val = -ht/(hx**2) + + row.append(i) + col.append(j) + data.append(val) + + row.append(Nx+1) # periodic boundary condition + col.append(0) + data.append(-ht/(hx**2)) + + sparseAt = csr_matrix((data, (row, col)), shape = (Nx+2, Nx+2))#.toarray() + # plt.spy(csr_matrix((data, (row, col)), shape = (Nx+2, Nx+2)).toarray()) + # plt.show() + rho_time_update = spsolve(sparseAt, rhs, permc_spec=None, use_umfpack=True) # solve system of equations + + #print(len(rho_time_update)) + return rho_time_update + + +def discrete_energy(c,rho,hx) : + + # input: discrete values rho and c, and spatial step size hx + # discrete energey w.r.t. rho and c + + value = 0 + + for i in range(len(rho)) : + + # MIDPOINT RULE + value += hx*(rho[i]*np.log(rho[i])-1/2*rho[i]*c[i]) # cc[i] is fine as hat(i) = 1 in midpoint[i] + + + return value + + + + diff --git a/2d/__pycache__/myfun.cpython-312.pyc b/2d/__pycache__/myfun.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b78f767f2996c22ecb00cf1bd39265a7a5aea7 GIT binary patch literal 82170 zcmX@j%ge>Uz`zi@RU=)HmyzKyhy%k+P{!vyj0_CZ8B!Qh7;_k+7*iNhm~xnMnWLB) zL1N4~EV-;vtY9`v4qGmJ6bG2in!}mP6~)EKkiyo&5XGIs-og;Ylfu!$5XGCq*}@RT zm%`P;5XGOu-NFzhkiyf#5G9zx+rkhfl)~4-5G9<#-@*_jk|NN;5G9%-*uoGcmLk-` z5G9@>+`<qgks{K<5G9!++QJYel_J){5G9==-og+ilOoZ=5G9);*}@Pdmm<}|5G9`? z-NF#1kRsE<5T%$R+rki~lp@!{5T%?V-@*{3lA_SU5T%-;*uoH{mZH?c5T%}?+`<s0 zk)qPV5T%)-+QJZ}m7><d5T%`>-og;2lcLeW5T%=<*}@Q|m!j3e5T&1@-NF!MkfPJV z5M`L6+rkiKl%m(d5M`XA-@*`Ol48)p5M`QT*uoHHmSWVx5M`cX+`<rLkz$f+nZ}f2 z+QJfLm15Sy5M`ZW-og-NlVZ`r5M`TU*}@QImtxhz5M`fY-NF#%kYdxq5apO++rkj# zlw#My5apa=-@*{(lH$<95apWU*uoIymg3aH5apiY+`<s$k>b+A5apTT+QJa!RmrI7 z_7W78ewvK8I9yV55=-+ED>WH!aU~ZQ#pfoL6lGRuGTve<s4U3IFVSSY#a>)coS##c znheqe!)y!;49pA+44-?Lz=?q|g)yBeg$YbDrZ9ooOzBMNEVT?Jj4(}Eup|T%U|?XV zVOR~7uVt)Z$YMp3f-+N>YZy})B|&N#Q<y+>6$1l96)OW=CnEz9I$228$qLd@Lyny= zout{xhRvTfj47m>0<#|;S|ppo4)-@J149Z2Tt6!V11R)iafUPGK{_C|gY<(;gZKhN zKk=!fgdLh7kkbrP3TG{I4O0z64Py;+I!iEvCYN6kD4D$krK;x;9S`hZf`vc{_IW56 zT(wUJ)1jAPjA!;Q`4|`&G+Ayj<rUmwO)M%(th~jRSWu9fmr?}MsmXGStsp-$ucY`E zM_vJx#avR9d5bNtG`FNEvzUc}fk8n*LE)EjvQ<oaW=TwLVsS}oQAtK>ab|IRL1|Kq zQA$j1Wm;*TUP0w8?##T*lFY=M+|=TXA~6OA25trhhT<$n28ITP4;%~vf)fNgQYQpX zD4gIr!*E964AljK3k(;qUf@-0aJ$1V*k93EF@x<gzkGwo0~W4!-$vgF$`dLUFixyq zV6nn+q4h;^y^BKnS6B>g&|P*##SX@m)fdIBE(%#+VX?vG036CxXK*YKoXLGrNO^(l z3XKhhE444Gm|hezy~1J!GYOB9CnzEFgiC0G*<~)N2Imi44159&ZV!0)Cy33ESYUR6 zTk8Ug)@M)xNe0C(hylW&`VYka%m6Of5XrLyBnnc&P{RmI{xI5wAvTSHp_Vy?5tIx; z#RjCHg7cU`VxWuzDz<Q{f@BeB;fACNtQ&3?imG(xT9z876xJ$U28LQDglkh+k=!Q6 zz);JIy@;$~sR0)jFx89<JrXPoJsB(vi0oFv3HCOq0A_Gu=x0l2N@qxBiUrxoRsu@2 zVCfn*m>Cn9diYrwl9_7ZA~2a+_8N8<hJFqt6F3m|)NsJmK}@J+sO2c(1sl%5z>o#X zoiGlRMyfnGO9Y{U$i)L!4QCBkmJmz^L4)F>ma&GbhM|VDh6z+~Br`FB-CWK9WpR`< zXtMbgfjkc`QHnSi7#NB`>7xkbX-%$MtVM}=>8ZDvb5iq)1Q-|?Zn5NK7MI*&E>11E z#adEYkds;ja&!?tNFylu+~O?G%q_^tOinG<<Sqg^_!gsgkqAgDW9BW+ywq}NvBH>j zi?QMsTViQNNkPsn=G2_jB9JGFgh53F$aYXM1I0z6ApKlLscD&cso=syje&sygp0+& zg~=B-20q~_#uExBn{~&2Vqjs_=wQ4dDltQGhT}|?8HF>|7bq@pT&S|3aH0B4{fnYn zE1Wk7ZZO;^vLSJ!_zuAxhC4-eB<>X7D1TAM@qqCO#uJJsSxy9=WWNv)cp)hGVnE1+ z(6EaE;V1boI!9a(iR|FK!NE5{v6H)l?T(<>gu=-ZpBNZ9H98z`@Cfz$cKXiHT;MoU z=PHl-M`lJ&jSuWBtQuc<82E&yD6bH^CT6g}aZ2foz^N6N#0)MA8D8c!YH+)uta*)B zYld@A$^_%y%nQ6)4Q>~BwNQ&dL`J{CAuvIBy6i;RiyZO|j*$G$u|Vtsx5fn)jh~<R z8N>~L-s06N=3!u9IK(65Y{qj)SIF6#B^gv<z!C_k83Ky1&u74G-5Lf+0|eAWf#mF3 zrW%G6aK5Z%M&!C0rW)qepsI#}fuWWKNt_W$oE1f!39g<EQNm<_Y7&@RpmYrjEO*qh z6Hp1y`?VYdRKjy;EhmynP~#42D~zgPtziR|Uo|XMTnr2;%rzXKlB|XkL{@PlnqswF z1k7Q9n!^e;haF@B7IU}>n!^fq4;zX(U{|5Iho^?ChP#F*ow=5`hBuf&lLgW|KxAf3 zmRmeIsd?!o8Nr36iA9hsdyA#Gu&Cq~TVhdia(=NUUy(d0k1;ylVsyI2=zNPQ(dZUa zlF==uWTRV*iMJS&ZZRg`Vol5`$Vj}!l9XDKc#AbXF*i5y7F%LodQNI_F{l`TW>+W$ z$+ev6sU;94MHUPU43?nm{fYtH0^nib<myoDWNC1`!NG~f_`=4(B{m^(hTvsR=?14e zoT6CSU)UHpx%=5W*(We{b2m8N;o!o-`NGD)%ir(W={ccjhSS9I1ws>R79_4vT$s8d zabe~~F@wuIh7GQFxOw|6J1r+9%`lpnzJO_B&Vs-df(t`e1TKucD5`&%+n~Ya4i6t8 zQy%aMPY9V|dVyE5!R-OJV1sXwI0FMiGBnSCQyVDXf$(P+22j@mmiJ(dV^Dm9b04^Q z32&xyf$HBHrWz)2dSb3&2GO7t63noY87a<Jf?9Zt5w{p4Z?RUmWTvIvVyy&G@WvZB zVv00CMK*{C14RiaUK$ua@Gx+3cNlhxOkkX#IFY4Ox<T{?Cl{)~0}kE^tew0wSQoI) z<h{tDdVx_D;zQ)p1yqNCeYgg#je%63F(RdZY^4FTM1qt{NNTa<-4rHx*dcYAK;Z)` z!-zHoT;hRr!pb5L8x+bl43Kh>Xp_LP2hxqjByeXBQo2&?Gq8(^Fa@j*hkL+oCE7h; z)!0l*XM*{)mMMjq2seSl0of!X^kDjffq{VteVDFhU|;~1q>vm{%T&XJRC<C^Ft`ZS zWC0gkh<45`=HkpeO=fU`S!50>HJO~9i$SSfK|ukWw{GzyXBH)w<|e1+m82FGIWsUY zB!V(o0Hm?P!N4azp|Crq!3|W(@$^e{N=$H^kT}t0hTuf^IZpH4X1XmfToAUxabe^_ z>&r6w7e(|pFm6!X$g&}DBl{kv{cJngb`%~EJHdERa%c5r3-5~t-WNr@PdJ`PJn3>F zFzBTFm5{LOAyF4YqAo<oT!>A+kdk^aHtk|`+U1b+i$3WWMbd9@^K=AunoMBqG@oKL z-E5-S48<943k+v^&eXmvtag!GZ9(A*!G$F&92b_aDOz8)vTQ@*4z?YIJ2^LIU)FcJ zsP1%;+v$Mefxv?%Cm0W!pD{XbcGB!ZQ1At?jTb{=F9yY4_Kv&g9CwjB?gKNUa3bSp z21c$V#s;ScoLoIl{cfFZ6AUMW&2XF;InnwukNgs*<!lSt78I@!+rYR|a$)sl1>=i6 z##@}WyKQvaVYnmgfaA`{oz|C4+%NLDU*z;SA#{<`yTR!JH*bR{r0hX1T|w<>P;7iY z04`k-Z4pEoDFL;9z(p~<?1koTSa|^}Phh2Q4I?CXq{G`2u#^s?LFp%jC0m1mp@<*R z;z#sSK;we7%r(rx3^hzC>|k{)DI9PSPN)bQL?i`D!#exmq=;ymSILJb7UgB;rCTY4 z6lFr{SB1o)RE3g^RE6Ti+*G%hpdvNoCCH<iEJaeF!d@Cg$bb?oYe7+FUI}O*z&rC6 zlTYR?=ETy9TP$fgiRq}d#4X;$qSW}j)XemZr2L}dA|D0@hI~*`?gv-lUw9a}`Fc{O z7|&puVK|d@M$!VonW+n$78EXYU7@rhaHZ-7rVWN0SvMr@5Zsu0LEPrDu<d0&yUSem z4bBhPxjGV?D(-ObUf|Qe!eMZOU!;TOgBSykK!f)OVFoVV35;D5H~57+SUw0q1bN|t zAeCGYK~@F<(GHde-24sRm?;6|d2mXoz?l*d^${!{T^M4OK|ReHh8l2*0V(fc=>Zf= z#f&{da9OY#SSo?aD%LQ8dRV~>EDV**E0Nn{FaQ7l|9>S@5vX$pj+B+mMc`;r2E~m6 zC}cqnh8HBfIhlE>iAC|nsfk6&8AU-13=C}`MZS>Whjx2PCR<+SmHouP#46gsc!QfC zl&>ckc3RC4oMAXq<TAJH0&v%7h2lajNI`O0$>fHxNSDV4Rwh=_4{QuPe0T!y32A{B zi5YmPK?vbNgGvG(kl?BSQpLgo8^VLC)P<`A2RXzPSinPgP?b(-D#7aED#1Lc%5da} zD)IxxK>&yd1cfn5L<plqM0{#WdTMb|2m=E{A4us^qM~C$VyEp4#~Fz;T^0z=bid54 zyn<y#;7axlj4Qb|7_Q_+bVM&}I2;H$p?EO-MBu@w3ju)#V=sh-Ux<jj7zQfQFNVcl zc8$G|ly)IK<6=_gg{<t0NjVpC^DZXkUnnlQP+E4excovz<;CKv%SlyOqZw2*fJ;9H z1_n^)ALP@|2E3rQD5A7P<cwOD6vh;$8m1IRP&=rK15#^(##?F$_2}RO*tP74G7-rR zW>~I+(Jl<J@(c{M95rlNAeX^h38j(7Vwh_<K$!@p4@NUG^eD0*w+4{?3N-~Wj#JB- z!d%Oh&QQzE#Nf`5!qUQ!!V2%L@-Q*f@T`XFspSQY>r7yc{lvgf%U8pd!dL|w9pXZa z-qdi_Fv9inr!&+F)bP77#LfcyMgW^{5Tiki3}CfQ3=Fk`HG(w)Wei1(Jsc&VQ5CTL zHH=xH(HAfqK|n$PhnyfdbfEQOF=LND?y##BLXm^$gtq2ug=@I6x}C2^s75#)9)FAs zHN4o|h*d9F4NDCNC@s~n)ChuTWOI62SQvWdurSo{rLckgkirJatP_}ux@-6#*&Hgu zSY%Vfhp5xBjwrCEup!!{Ae(AM2<b^-t6@dcBbvfq!&f5$(hq8@){24oqHw-=4PzE) z+zJsjU{;M-3W3py6gJRw5-jb4%m$eW<JWTFFjt(Qxr`|OMw^nT6-0?|lzc>HS`eub zLXN`{(10Bz4A^T#;JHGAq%<M{OC`uA<4ldBsHqXf1)^{lNRs3NNo?tJ0#gqQJS8%M z(q_?oLTRc7OF9InA*3{gBd*CwLtIFsRUCNZn*Q;GmQp4#^{gfodRTI#2-QLle@+?f zp~u|g1urMTX>kH$Q4l<Jf=f=qsS`{1LP|WsB^)^=5pwE8Es4--9@^z$VoQ+;%ss#0 zp$QJ(B9;`!TB!*vJ#2$4)gp)LU{AFyJxk%CN@N<Pa|y~Ygi27>9v^sUg41X*(*)L@ zFhc2*&LKLK(kE-rR~Dp2JvH3$7MXMncMWe1TMcK8bUJG=gC?gR(!h*AXq+G1>VXo_ zHZ;R$(9Gs^hE(ucvM8n$#ukPs=1L|_=3g@DsU<0;kQEYoHaYppi8;k~dbyR!If=!^ zDD!oomOQA92{kL7p@tzIY7GNJCBsTaKTXCWIneSB##=1KsX1xIAlsqiksR^yprsh` z@l~q$EYyQ3TM6pag8E+#3^!!uJ6wAGz~cjcnk=_C;^XrYb5rBvZ*j%P=jNxB=788d z@$rSFi8&CNqG(Vb+8RWFramA;qU=yl6@@b}FsuNX4_fTf!tj}iky8O|dJ)J!ka=j3 z)#+9}29NqGUF|fv!6PMz0a(cB38+H^9$LM{mzJ5Af;LD64vJemU>Wd?bZP~7^h8qz zJSh#HX1>J%norJ4EiML+<=*0jOijlZ6lLZn=G<b+sJO)q6#~T(WPTbnMstf7wmK#r zByo$Spd>!?7MnMi2lmD-j*_BGF!vTyK?!Ky8#;n^i?<{(J2fvp88ohzpH`#<8jfRj zcFw%T?ChLX1R4gq#aMZZB{wsr0!&ukVy=kKyv1A@pLvTtFTW%sGcWxXH)J^u*n(mi z=$ahpv>OHm34d;+h%YK*U|^UAii0jT&`jJH4hB()8Elii8+;#trh7UnW-xYEb=qI# zlxlFgAtce@jW!j{D}<PAXJpmyV7$R0e4Rt;B8Su*n-z>J6j!obme#$>q4$BAkyZZ- z8-u9$bdQN1GomkxXg2tQhHYJL@bWjfeP>}}RrtsNA~+ZX#JejSJRb0geP$?UWL4{6 zy1~KIQP|1R!SX~zX@SWK=F1{F9h^5Lq&j#X2ugio;N=Ws1Wmb1UguG`$fK}8@CuLe z15vq83|zcnj1w3i2#d_%y(p}DLqz6@u)+f2<+2N9R}@}UG~A%PU3a7I0mh3)PFIDU z??^$%x-U!VOhDs35RsbhJJEN6(Sqa^$_sR^i0I!Ck-09Sc2Pv_x`^&Y5#1Ff8+;Ea zZuCFkdm$+Jp#KHEkgFo0A4Ne9_`nWw-v<Q-9=?vmPSXjF-BvgF1t%zWmd;@8uDT&2 zHl=8K#l(sQLJJ%huw52czbK+Ufe|4rxWEV@{6Ji4hSQ9~nJyEnCa~N<<~$G)zb>M3 zQAA}y$O^SBLfa)aO6)M)5x7Hjv;0LZhl{EXmqi>w#uB6IiI~y?!v%p0O)iUROklo) z!h0YfHl24O?~I}aiVK`)WL*)^yd$Q(z+*+=WikB;%r^uit_vt#6i~V@pm|Y1bA`zU zW=ObP&~mvV;7Y#cYXYth1cfI!P86M?I7tc-Iw*mL5<0{M2)b8^_R0;^aJVL*`2m!; zp>c^j(&+B22fUy`;Lr()-BAx<$*v12*$pPABE_(j^d$Dd9->`QHw1*H7)`gBXfY#b z0pkp}%fiYR1(Z7+9|((qr$JaIT6MTQ5D>mDpm0$@VS&hkv^7ENBUVOiDBK~qA$4{9 zMKzm?iZ+)8Y&#sE2+7STTp&2J<g$=Thw~jF`2`#+1TPC|cR1hR5xLGIcacZ#I*;l_ z9@PaUE1Wm*?nvCoe?iUR3J-jxk<pCA1<W(FuJNeCW4vc@y6;0iGpJbqQo_t2EH)u+ zvUY>_10JCX9y796*zVB2!0mQ{#SPpHKv^jPY9oM0r(KvqqtodODU6F4(-~5jz$7!6 zWC4?`V3KVSV>%o1DnZ2fAhtD)6BuI)L8~edBho4Cu(qBH!vx0Ic<?GGR#-zT3(MMr z8kQ6e`1lfQ3TG`FY#n0_6J+IK3KwV`6NE8jxIv;I3|{lWjv>zjk^^B38C0DdHS8&1 zkreK17Y2qR_ZoJHtx&#w4O<Eqc*L3w<lby128JRYm>O;dnC^6@TFyLfm^e(7n*k(_ zm^01dfQ1KwuHmeKhXT}2RwO&wQn<k*WlP|;yCInenrlZG1!mPSWPz5if!PS620Zr< zi=kRB^iY9?lnrPlCl@UI!J;_K1F6glgQ<jsA)E&eeK-#si|Nd@+*EZrIMzV^0J#LT zl8caQ85t%p_Qck(r0{}5w3ulEV^1hzrXYn67CK;cNh~1MB`ge}<>Tp~^$q-fE14mU zY)~wMXOuOe0-$IBcV{%2!4yOpNE9XjVuM9BnZY^{%3uPZZVFiRB@Y7w!%I+pdHL`E z|NmeyuvW0fm!K7%RZ8$l?EIq4)Vz|!lFa-(g_Qi%Vuif?5{2^oqHIkLNNXQFfel(? zc8fVTKjjv4N`6TZXnF+P&M$HY^?P{0jrt<+IyRPq{Nmywa9f=*8{Eh)@&>iESxOR1 z4R3)+qgz}cc6>o<QGwwtw)~tF&^mXH#L|j*kY4cON#v$9q!lfK<WO+y8@w77G$pIU z$iPti1iZlG69)sY$Rzh!?u*=4xo>j6z-`muatG05{>-4nYSY08S~^-ZS@JW3BBxD< z;~fE^>1-3(CKS#PoXp+fctcpU!{vdn$b`U&CNo%O1kPk%z&MlZqOkG;#|4QCT~>&! zNL(qtL2#wiMHQ2a!X_OqH%L(XKmcSe`wYcN+zVJ11TJJ>p}2_qhJeTn#)+I0SU|QG zPBOirtiFPAq4@%h6($=PSDIf?G@j`7KuHa>gcc&QLgRv>@dU3Y!lK{>Bw7nXE(>d| z2)rb$cSA&ChT}xf2`&%#g(nntRYFa?BP%~YVrImG<mI^wb2m6%R5QCQYd*p4hP2F# zz?mixm&~+<tc1CtuDc>-gW<~bh4mAh!HsamOCl-{6jZ>@Wn9Pwal;0~l~x;4b{K9< z-;ua6>w>)F1P=nXaLlwr*pP8i-5h58MG=)p0-_HDg(f6T5x*lKdO^(Unt=5uXGTR% zn-9!NtTrD!7#Vm4CNOrGH$ZyBC>0f`iU3zslfbj(s5988bI+in9=_}Zy7vY$u?3O= zO#;DngXhHIy1_h<8rV`axD0qs8zckL4Whw|QJBERJYywuC5t92bf5y1KSSJ#K%H|< zW@vZQ^%iqkVh-vGCibAr^o$b!w4yEs28IYw+AaXkaev@o;NgX>9Rh9Lm>4ocaboyI zZW%~JW<lZ##)T;>3>T))th>miw?T1(<3^PYg&Wm(IPOT?>2g4Dr~5@C=L3l+7!RhL z2s}}EGUP%~@X7Flc^3^sFLH%8INxCB>IiHqzrip3fL{dCAOH<y!0G@{p#=_S&|0eL z4CxHDjOh%uOz8~Bi<FTnefR`4tXOnmh;0Q`>Yyp@bfy$$@J=pf&@@~+b1l-s<rMg0 zWEY0mECz;Jwi<>S@WuqtLSaxL4Z^kTa8dANWi1Dya;V{`VTUyginwanA#q&84qZsZ zRLfq&m<4L!!2AZKYv4@~Mur+D>?$B;)NrITfoy@8oX&)pwq#_eVaBQx?kh%yo*Wj2 zo^}?78b;8f?sVp0hLx~X0x#zw`v6#w%3x4>0B;4*<b)Kpe2^kBH3hOL;udFdNl|HX zNoi5)End(viTINI_{_YN)Cx#(%$}K-0@@t_DU>@vgG5=NkqFR`54^mE6t=?YsU;9Y z<Kbo&O=MtTNCKtDJCO9q!yqQn;CDknsKN7wh**Q~6GTD#iGhbzrGxQ?i0FjG38tXU z05e1vFfLG>QF=j0wSyg00E5@=b8)J4I6e>%1TA@;5I!StV$?+eg$~CXf<hAvCyGo+ zoG9Mm^n_n*LeUJxp4uCtViO8Mi5|4IV1oAq#s@r7*Lf5#@+dA)yuzb$gGcl_kIY3L zSaxulk-0)>h2x6El`bndFUT5Q;W56!10S-u%A@>Il#5g411k@!$_E(+Ug;T@3t}&D z8(&~ChNOK^eubtB2GCLlaN17;mocDppTbznT*C+oK$s#J?ZOZ%3rgoT%qdLZh95I1 zrIs-iGxqS-FlB+vfhmR3HAs{BJ@PCJ;N_6aenp^RO>p(C$poowK_hRPthd+-AkhjM zmAoYcUn2$@_<~4)t6z5L=vy(Ugak)0IC6^;Krs&@YC)0d35iTD1}T{fEMhk}xEdU9 z2#Za(ooEZ%&7s-B4q71|*vST3-Xu67{1Ug!19;ac@hXqT2Tm4N%?~^bJaQLUWHCby zl*>Rq{(J%)a){<Ww*8t&vY-%$h7OE^m)s1H)k&bz7EvC9c(6PVD{)F-S{bl)AFwWv zfG&N8nF}@*S_*;|tb$7SVDO@EP;m%uRcW$<$4755Cngo&Voff}FD|~tmXn#6n3H~s zB`?1ySChF2oY_Dlj76QG90v*z_(C&|+|0cAl+5CiqB#r<485Q@0j+LoVEDquz|GsQ z)~Pl{Xu8Bi_?8+_r*)?0g1`lZ3qw{YE)2UYsB@W1cTLjz%$1qz^H%2V2;5P)Gvt8c z0mp+XJ0maaxnJaRZ*acB&DY=si5}#j1tkrTYd#+VuNH>|aSC`gqlh&Hv=fC9+&+fx zNI{WdhRT4e#}sBztGI|Yg#}~^R0iB@K#_syVTAN2Q1-Df)Ud!Ju!yUM1zOOuz>3&f z7Np9B5uvu0Ih~=Fm6M@{xrVijp-2Kz4WzKvFu+V?WIz&UgC!6~hDwGSR;<M}QkDey z2UO+MK-~gy5!knU@KviYIk1mGa>X)8`oN+{`UH^lLFB+BNM8{XXr*cjV=WsZJAjte zK|%~-2FzE*pq?HxxRAlF8f+?xYIca~8a8N5f^Dl|gT@gjs6qny6=F{<OP*&98+6Sq zLJn+dElZwt4O<Ea*v)20`}mlUU5bbSEGod}AXI=GO$b}Ssj8MGj|1)|uzK9-U;<+g zGiY82RC1^9-h2J#{hHgh;PNwlg5#(E|Np<Y2e-Sb_~5;toc#O(J#fDWQs1(ITV3Gg zh2X9YxV{BvOigyM-lASm=?PkTc#92O*1Cc-_bndK?mSR!3#wsnv6p9LCTHXq6oZOC zaM24}pU#UsFJf4<kb!|=GAPSG2KQe+a4~Rjb_h0>bQCsK-ryJOU<7T>xxvnXUrYjv zI;hwK4zUSk7dhk_9G|fBbreoeY^lA$%hyrZ88Sg}g5yM$4(|@eC%g&^<TfyZ_rpC9 z7H#l(z{PiiTW~_mMQ+7AJc1LHCnWax+~E<tAf$AKNBIVi;0+Fu>m1S-IizQ}UExrE zz{A^-*y#dk+=CX<clm?r`h_fz`3TTL%SHSf0yh+H41twRJHt1{pAbA@cv9p-V9<r& zkc)w#7sA3X21cBczacEy;rl>Lyu<yDu+$908JR2CF3Ra|V7w@2bVb<shJg4C!-+f- z*zrm};1`>r*i~~wT5*E!4MB++j+10=NGL8)yC|W#Lh+)6&O}F$Xy7DS?4oysrDu39 zNV_PbyCU(TjKK!Q%fcpGDh_a-P`l^=-j?VPav?1IvU$WM;RrMn9w1vY!SRWx#tOL| zjF&~NI@~^5GH~-lN?>S(0<N7vr7^e!c0;QmQy6Q&2N9$&fhrzYiIvERRE{y1fRZ}c z7+4+$m1v-SXrNv`Bwfz~_0XAbu@>c*=A{&Yc;J++$yihhs*6BwfzDNeH;seRaB78X z(Gms*h7+Ka37RJZS4Moo6AULrPqbYiIMMzxuQGTK1uIvF#uT%wEK(4U!JWgvzyPmW z=0UbOqMx$>+Ej(KsTf;(0JQKGR2bDTL27)^a2PBgKpUcw>UU5uf&&6U;4juefeP8k zRm%*k?O_ErXel79fCjNsSU{?4S;(*t+RKFXE5W8QgG{So2C*Sq9^rmsrLqlN3=C*C zuz~te;DVO9hMAiIvgHA#j|xdOp!B230&dw9fk%60fy%wvptQse?tMUtJr+<u;}#pJ znUk1viw&~v<`x%Z&tpMOY4I&?2s<}3ue2DmIOH$Q%Ph$+ElDkc7*PaT^?MPN>~6u@ zrLayfsE^QT2^!Dg?~mw=m{2mqabh`S>|#d61u>1wyqXPeH+cDaiux-%EBos@>t>`~ z=2yMQtGXcVBCk$^+XprVet`*$ohcIxL4!M;nKKw?D9&W*%$t#TkypOK?T(=EbjgX5 z)8!`0flfnMC<5tGFO*#2xR7^6+C>GE4Ph4*tS<}NG<e++5}B?!QFFT9L_OFBXVA8t zg_<h@7s{?EyQpZoAq_OuAY|L%{eWF?g7zhL*&9L<6T~~%kai}5@;xlQf@%s7|FZ_T zC5EW`v1SZtGZj)x!;&wY2d*a}ymV0F2G^vJECg2z9=L!da)?|yb2>u`XiOj8_CfC2 zV|52KQ$y^64~l^8gYDaa$fYyGvH-*`xLSxipnH5Ea#-9!PS!)(JB@AJ9I-zLR(2u} zLS%tD;;^`Z(lsnKh&Ad+?JcY!!vyg+tmsN%Lae2&Wk-r#Z1%7rhA(P4YB-=KaX>?d zsg|>bvxW^krp$)4`xKmUS3(Dn!QB%u0nW*6XgT>7EBM%*Tg<7Iskhj26H9VSb2PcY z8T%HaPZ6kFTci!j@jBqK+swQ&$6JiPw^$NOD=KbrfQ}9*N=(bY#aaTQia~yWwsH{N znv$YSux!zG1_p+wpe(HiZsUC6Vc_KM=jh~sWWyPZ6U`A@GvM8t@C!n^u#r{VDsBns zeqd!1(*MlB#A(pr2-%cO8>6`xc=$kjl>6;F?PnOy37Q`^Gi-t5?8wX9>J2VWIJo*b zJ2@vPPH?%*A=%*ggqyeDqSInR&<w_jp)-tTB+fKlAUMJQGPg>D3+ND$%FfCef-_7m z^D8uX+~5_Rkk}bLLt=sAOz8_estv9WcmyXXc356ufsVo<*Q)Shq5@p2A|)hbRj>pL zEhb=@9@LwKWqD*Vcm_bOkFnIKHB7LYDg|7Jfx2Fx0t2}lhpnfb0$zlj!kWUG&XmFi zB9Sr!YYG#1BoKD86Jt74I&&>s3R5jRqRUVMO2uGbA&x45xy6McRt>b(18GADXvB~e zvPok#s8D5KV2EL2V5sG+<*H!^Ep7+3DwsgyZ=5hwz~i@|jV0_o8Z4k`Iq)X$TJ9Qd zXe)uIh6lt3jn-+hBaP{Tvz;b0^z5Ib_C-5E`36ovj{<sXugMO{gEK+WGe88mUIn=X zvdRjSPc(U<c`*GJV=(BTpwvX~TTJDdx0ot2Z?S?ZOK<QH0(3+VoWUwGi#CIEX<}JX z`Yn#kypq)PqQsnH&~8jbu7qbyR**99qQeXf4F5p65_FCcxP1rBm4c9=w;7BRQzlA+ z4hNcP0%^|Aw7krr+TaM<h!5UH+U+?ZvD3T3^^+U}mpJI80B-PpTIU(e6D(&WFHoKl zxq^9x^Mc9?LWY+)jT)RD@bOPz><9(*6JTS47kK0wTpw_8H#kFPX^JMBcgKNt(<yW? z-Vu|VQMw>-LD34ug{7CpbUV2_#3u;e;Nb2k0xjm57&xJPa@0kB`HLL#oy?%^bPAvm za{i9OPSXaL8~nl(Tsx~8JRb0fObD5wdVyQv0t;lZ8_MJXXzUY|Kt6+xMrvo6&VbYa z!;+z}9U;~U&19WO!&0zBRm;@Dkj9w8+`@rUlY-{vz=;__AZ27wsf~~Tvk*h#+(@e< zvDRwvVP?dTbS(=ci`BA%M-|{%2`R(CPxXMg+=U@lhJm4$y#~}^WCtZuP<CMgjk2)R zfcY?$FdCkL*m~qx7;4y3SV7HPQ2hnn<qfK}IBJ-{83?)FLRoAHDs@1^n&1N(rZP@v zsA1@YHZnS)ZD~*`2C@uN24ToDfQ}FWSq*Am2Qz>+fr3I5JXQzJtB@%erlNdM9t4fd z6@y}0p#eNP`isLRCowlECDE=*9F$R95o<$X5^78g44(}c7#MyuFia=}4Q_)@x`7PK zO{`rIII$kq@m*24GGry^2E&E57sX8)7{Gm`BGBwC*d4F}0p7BPG_*BA&H@EExKz;O z0yngac7bv@YhqDqqHEDMkcxdEVlRjQ4Jj5ifLI4X#34|xj*EeTApxHG!9KeM&iJlH zr$CDqnHU&~w?Mq`g#&s(N55aEA6k9~)kcqbOd4FDaDp@U6sPI#6WwPxPxhH$yF_TY z_(Jg&!i!}t2<cts)NgRQ!NuL-*df~B^nr_kn+GjVGqS3J7AwMY@5EBj8lIV<lWQi& zU*uQ0$f4573|h3P3fdG)ZZ6hhWMBa0Vr+R5M_vTQJvfgc2&B9SN{$E#FbhXs#H$h| zFM^LVtYuAMs%1mknF%V1z~<M`DK7?tPoRQl4cg|yqGO<>37-3eWV~C9p2*p*=s4&^ z2${mt#FU~$&<qx4wzCDToB_=upl2h<_&biwa~5PTBJ+5Xn|ZhpPAfXkz`$S*%0crX zIp_li121T;smla#-P!5c;7U%W5ukRa;TCT22L~2P+=40}aFyo(Zb7DilOyzWP39El zbSCKhEVxyc&RWZelpU~S7R2JNS|-HM6p|?D1Vv_StwK;4SId&ZQp<{H=OH>}(8B<b z4}oI@cfdey2es!|K+DH!8EctpSwREDxNYcBWnoBRL~czYvX~1)tQ2U58L28{sR7R? zBZj48m_e;eXxotkJbcdHBLiw9Lb4DjP{5<ikmKAyMQ;&kFb`I~gPVCES3~CTK;A9_ zouCG8f@yMti+0G;c+h4A#8MjY*=^v4UJ<BFFY*E<WpEL{2UNs^s&RM;4;x+<0!`e) zT7dEXxvA-iMHd+u7=l2_6_ghm7(Q^J9{*@Oqj&~*I%$FH3d0qFD^xG=8lWEnc?W*f zA?gWeH+cEL!+@Zn!R`o9!%d}w@s7IT2DKfEJHjrjyIkZ@=@9M+o8U1c>;{j>gy8A1 z6Jsy(D0eb~8f+?Hr!;un;T4(yK0S_!RicCOj<f=7_+_^3MGlD$;f|0AsvvuvJNY{~ zKpG`LEj<xf;sy;gf=f;Y1_pTd=nr@_wT20~Pmkz2xG==}fTF#I75Tg~PzwjK1b_*& zatpp7fE#rFFyk7g)zEUjmZOFpvXi=&wFD~0K>Rc#dky%EGFaQGh8Z*@SIpSs!^n`w z$ijf=?xZl+v%$J&j0`oPHH6?((!jGW+(_!+wFV<Y4QPK1n_tLErdwPEsYSWT@x=wH z$+x&mi&NuEbJGeEle4RM{Zh+I^79mmOHvDpt-#5*N*z8e0+H0sO)bgDPf;jJO-#;6 zO;N~AtjH`$Ez;9ugRDvbl_Zcp4WbF-4jLikyv3Myi!Hq<F(p3j7E^`6E#{K^oLlTr z{kK>u@>0ve3HugHMSf1oEtZtjoQh&l4G2%rkR}Un9@v%f5Qh}qV_;wa2UIJ3aF7o) zASg5e+-=}s73yFFA6)W;Uu<I8jFibW3lmqcEKXk#u|wsA%N72>4i?bDsFcp?4i-=s z-LliN->%bchRTeRnVOfmRTdOq=GMB&Bltm(gH`B@2m>ouhyD!3D=ab(*f~1fCpdO` zHr0WTO)<a1VL|BRlOhHNhIaOLj&{!J3@MB)3|;IgOf3vu94X8#3|*WZ>>V5(oN3G{ zEG-<hOvqhhNM|{n8M(($18R3OqI6|hYFU{WYM2mdx|XemEto-*-4CU-2bDYE_B3dN z638LQ{eJj(Hx~m#FhkLFPyqv~qgOJ5=em&gF+u8Q$)wVxq?}Z5%*wd~)FuX<Guyy0 zArUgJIx%fV;EckVArrHqEngIOfeKBqyFkt1=?pNJ1T(B;f(~hc98&ZURMH`)C<r;D z6XXmoaN+<tV`2))R)gC^<XGJWYMwj=g&=6>6^dU$#RJ%{b_@&*Q<<kTFfnv8A`bar zV(4T-@IXBqCWcOCRFM=$kT^JWUV=}p0Xb2V1sqMcSc_8Ak_;h~5ri^^946Gz0O{2b z3b$@h^#Gda`O&~IF?5FE<cOK63mj)>TohGXSh^x`am7VZ{S8bPMU9~03F%CM!fGWW zczjP2GIj_aIK0IIa!=8IP*{MwDxlE`$f(6@kk}g#@fH-ZpqPT@cn1duNS0@Zrm><A z3=9m-Aa?~bFfjaTVED+zz^l~YdXq;AO5Ei(YH)!L>SGyJxFaAjLvV)a0-?(SDvh2E zp&f<~x%fItyR{pfA0QS(fYKqX@&J`*p#1Wg4b+X`Xy<I_YUgg}Y3Bvyn-+#HZg2+X z0cT)d(2*rw94V}z%nUkqzKaW#p*y%bxI1_{c+=QY*ufc_BZVWKDTNbEa-?vA$F`6L zrm<&i#6kxgM{9r=MAxuF7EPx!)v%^O7EH6I@Pe8b$m`Ki4vGQ!5awGbjarD*z$+Wj zak{|_ntW6!PO#@<Sy24|jcm*UWfmwqJp!c^jO+x8GH`Z6FFJ}q=OuxA_vF|(8)PH) z42;`G(BK)_HqIf!Mo<<3B|p$INpSXuED8f<e{wuI7g0dYFq)AxLmir-QOYa0cj1;Z zfhHRe^#eIJ&qJ~qeBwAMHd}!b0=a&k4+`d=AQLc>5~u_NCnW(!28O9@(~$?)K%oH5 zBQOf1+y*T$0@Y5SP3utgObnfDsAV@3LnjMD1b=S|wJtz43)Tz-)dZliEfUQ|s!^C2 zI<dGMW-hF*DFWT(u#ySf!n?(hTbfgnS&&n?60#2F7AGkCf(m*=1k(t?G%i9b>p?AZ zg$6JH7xa)S34bB5095?1hjg;%D9zWNsl70B1LNYHjix&UH(Okmve_AWz;JiOMJd;V zsV5u{XPhZIUwyLrVsy%d)U?ad=@&C{FXZK4&M3I-UwF~2@S;@V#Ujx8T$hWgu2k24 zXJDvdbZ7bqr#zSfnLdEJO2JH@85lg6LO^n%Oi>`gXr>sDU?@{8h^k?X1IZObDCszm ziUg)4keE~w6EtBX)sajPHYkOxgt9>?ZzU7B-3^w6u|eXREZ{m79Pp4~jSZkyHRPaR zNEOQg3w^X|_7;<iFQlsd4@&S1j0_A#j35FnHbDD1pf#_80~mlK<rX){Txg+O!~x0_ zpeR_(z`*bqOAUNeK(WE|rhq(@_|C*2kOT?~fh48|4^nDo<kL8lC%VlroDn?JY=Zp) z#|6m?-BuW`2wrKn!2W`e@nuev2B$k*JpG!TniD-{giiKf;B=Ww9cPUW>QaDf^lhLT z9bTWe^MQ)g7KSb!aE;CjuF?6xH99A_M&|<8=-eHgu=>1%kG?fJtYe5$dxP4Mu%ZY` zW7hm|oyZ4yW2yIRkPnapr8=mIFbY?@09FbiwNbE{j%+(&)8VW2VWxx1H_+4pO<N!e zK8Xc6iOH$Z^p4pG*$ApRsN4u4Xz3=9rBrT*5VUkN5thPR1B~F-z&B6<gS$>9Xyq1= zmES>zU{uSXf)ZRU!>uH$0YcEqtspD0w*+umNucFH(8_JNtpsHYurFQMAdQa{M%0QK zwYdRV{Q;_^QM-pI;z&&oJi3v(n+V;YEQi{pz+(?S6Oft~cuYX*026QnR=aSS4D|(S zIN&k?ryD?_OdvMEYpg)(aJdAv@rD)~$hxt*4(bo2h9jCi#F~IRHjqug84?&N0@=05 zCLlF0G2#<<Y#^I}(+!|d22D$ll{=7aKy?RJ`<PG$V<3AFK=u=i5AZrRkV*n3;EoSu z6Oi2o_W@QnAjb!?3CQt*ZUSmJ;4%TH8$iA!Gk3sULRRj8>&6-qSba=(Y{2cpm2<JW z0XZy@O~C3ls0p}Z1K9+;u>l(Jft;WZy5Ixcp@4KP!1eP#&{!Mx{ujaeYdfg@51QEe z(ZDbxXrbtW!o?CRH8(h}*4a?FQvaf&<xb55j=OaZ6z<f&sOWi8^+G`4X^o4)F&ARv zE(gb7NJzXGoOIDE>0)xuh1|T$$@v!w3NI!XUGyrts91EdxZ*-()#c*qj|>d8jBcQw zgFBN4sDo9@=n3L^F?oafRJDvgAgTmH`7(up<ieREKq`HiB0;<;rf857U#1ukRl*3S zl)z*x6Sz;3%#;FBAIp>qQjx}#57Jh^R0tAGV=4miikV6va-|?DmI+LiG1Y_Q9GD%M zKQl0tF*z}RU|?`&c47Vq;<<vTSSB!~1SZ{>y+JB`n0-O&-I)DAD*TxvK!TCXQ6NEo z=4cQvhB+1@7YCx;n88#$a|TE*lQ|2dGM+gb#LHpM1u2PV&I3_y5K0Nm$!9JBsVHSG z1F6qvE(fWoV0L8rz`)?d;>_}yfuVxg1;le@abx)i;<<yUd}c7^!4e3P3t|Zdsq|n8 z0r5gv!azzqSi(V6J~Nn70+VtPAO*23@gSK*mNXDAoh1XLIguq3B$&lg3=%A1DFq2; zv6O*$<t!Btxk?a~$O5LSSe#iuFfh2Vy0U&|V5nko1M%EhJy<`2c%C3Ckp)b7vHFAL z0$2k<D!o{PKytyXF(AQM);N%0Fl#)Bm%y3`kxK$mUaVj$nKcI_m&=+5Qkl$}58@TD z7J`%{vlf9UF9?;$0_GL7R)CaLvQ~jK6th-?l+>`gvVCA+aAR|4`^>;l!|DOzd9r!2 zfkqHmy+Kqlgz{kv1<8f6g@aW3utk7)k!(>QB|dD?AgY)ZOeL~_sTj6Ikdh>}WRQj! zwiJ+(RJJ^jU_M&`NHCSH5X38DD~8CGfT$QYFjdM{2a>C2b723>z);HO$PS8pc4zjF zAf5||ieUp&i7a5smE8-Z#GBm*q`{Tl7o^0GJscz$!5#?`^ka_#@uJydAab!F%9R~V z#j&S@<TBVZK`P_evp~FT_8gFsIQCo+<qDw^S-`wJ_F|Bd682J%hCKE%kdktC2aXR6 z42~R59G@8&%GsSkJQogEj*lRo8;Htd2UG4G0U)_Rjv$aqcaC5XFN7l$q{N*g3`FIz zgQ-LoFcr%Z4pJf;!Er-YezeRQEwer_FpQR2qh%HY!%!%*J}@)NL~>w^A{Q}%#&KCd z1T%<WWn^Gj$%;NwTn8FZ0S_1RgIAJ6hKsp8Qj3dI<3Ypq=mW;gF207hm|c90h#58p zEoFiY8>5UH^Pvu<6^Sx3Fn}%~D+X=jB5LTEQ7Dq*0|SU`@cO_3-l;{@r0y5+E_sbk zjR}qukcN&gb164CgGP#R%cym#O%R?EG(B=+<jkrSs+Z*rHV9u9HNDJbwnu2c*iNwn z%txG#dmi*WnROvD?y_I}g@okGE-9BSQ!jF*qKr6$Mwns4rl7@0ApU32W`5kuKR|aL zgO~6?mVq#(u!5F|=Lyv?LataSV((z@;7nrz&muGDv4NJ0q%oy%fK;Whq_BYZQGiGm z&~*c#i&_!$N}$swYM4+Ki?G(RftPN>7YTt6&_$W_134Nz8b!)Nks^HSGq5b3J`EZT zqw;_%bWsWx+s=S&qw+{9ZrjeH*aq%{p#(5!^d20*pe<EXnFy}}fz89MWb!L=1!Zec zXAXLFP!VDg1t{Vau&wjJ5+WBsldzx_0zVoUW{S=zoGr0Xbp_*Mjf;|6D-176>c9#e z(9k4x9A6|2DyxuP4xWhuFaIFL<(EMHP?*bA7ckD&SZKOJaIwWjDeV=G7o~KeE~lxZ zL17L#tBJ_qz5+@TGN1qe4RIH}1I^+=7Rub>0v{d_pO;_a3NE6H-h*NoZOsfgs6e(B zfx`<l8V)Ue(U;4(7AY_?FdPE~QaX<1GN5g!flc*y_;os2Zg7H*2a~_UuiMG;gkPtF zg_Oy<hur)VSi7w+u!unx7lWb^qxcgcYr?ID5q8!Dc;bz>gD;H*Qt0K0)i9<&ZffJ~ z-~=zS0m*TJ3dB4P@I)PG*$qnyH%KIfJ%t^->;^=#Lzmqk-<iu$!-82v*04dB(Xhdm z(bTY|@PQIt4O<F7hz5<bAg>t3wmb*5phexu8O)$5K#v7AShD^@P+FvN(Tg;DkHyAE zARDP%{NlIqF%dR`G7)I@7qXzHh`=lse*2z)?4#-w7JmDlg6yN}{1txto}t)>S*3x- z6v0*66wty(mg$6-&mfJ1qK?eMCVmmk7;L+$QO9CY=O$n#fJRkeRVSqO0j&ksWCgF4 zEJCb^K$NSXHXC?p3V00!#=;aVN&6+J9t9n7^rL}cPSX7Bnb|8<H!!Z&*l4*UaI?*2 zIfo-c$E6QSpNzZ^9&tM6Viaf~^m0_vWyj=;a>*A`(=N)UK`SiqFfB^0H3S0{yw(J< z%miFnkshe8LA4hwP>beQ&#Yc)x<PQY#YWE^iJN^c%R3%XI<9?C`();Yh{)497o!s{ zBqm*sPQL7va#23zLR$JoxpZit4vX*vr&M?g0CI>MX!#V;iS#Wf_iDmhAK>mIxaSD& zLxQ@Gn#@SuN90bUJV+P#5CG6pShOx<5j#j0yz3Ag@nDA{Vjq#OaP}6FRvEb#fsSvz z1B!ak)fYr`6`}2zd%|g*9ME>mJ>m3Dj%UJY9UP>zV?Y}Xc{_qT%_b;LaGt2vp$}Ph z1UhJcq1p`n3!GX6*BT)=ec)@2j)9s!;FZImdZvY;iwnHUh#S1hhzF_X$KAn`hOOtv z1r}!qHDvPGKn)?vdVWYNbwGU(SW^v3*RZ8<f|@p<Bcnky(pE*}H9XjLLXJ>|Hk*)i zB455o-s&J`#2zg~6AD(uBl!rM>BzQ&dNNR-!YH%`8vO8VnCYMj1a!nkFoPy9YGVy^ zkq8+}VX#yu|3EpFs_h8Wc|9yv{s&n})useND;by>7>eLl;%QBQHYS2P$e@jgs5L%f zJ|4C8fDnhzO=HZLqUiu1C<{`JW&>CpO)Y#L7L+tVdZ4QjknO;$56up+d1!Wk^@GfY z*@4CPa5XsfvNAA$`Y4bCWx<06p!O~}tide}@LF})7y`WD#?q-J7@N$@3=E%D8Ns{Y z7YHtJgsjZnV7M^<qKfIp%ma*@a}F48%)hAOdQs8sgdw!nz7QRAGXG*+`h|?l%W+v3 zvU4uR<zDp5y_g4Hmz!60p}OW`UhPK)hI&R1(6p;3lNV@$ww}=&#PebD1<lOXGx~w3 zd<f+ap?nzqK}r+@m~JR4E%aQGxY%cd;6i^$<KckdM*oW{&IdhDBp&v;5EyjO|6)k& zg}C_3A)rRY#gOESF3A^Fk}oQzT*%D2kb~}?i#bF(DHr0FEVx@fFf%Fy!dC=?3M1@o zlhGs(O8S%~`644wy99Zt3EVsZwM@uK;B27c${1uMXz*q<+F%QdQ{bf#bxJB6(FQ7< z$cZ*ilxV{?X;LJ}$iM(SXr~C#(iH+lHQE52Fh~qEl!a|_1nqzw_>dgxT!?Ft86yM3 zPf!E*F??iBPzkzZ9=v!S+M@l!#3&ZX)XDV#GB^k0eqjR%cW@1bNfD$|hOst{|A3Di zz<FdmBP@-<Xcvaqv*4q&nXrwtK@NonooWL<KnnFhdA1a0<eSfH7$K@bi~L~?J;cU- z#Ha(x0R)u{47J=ftTo_fZVGdD3<E<^F3hAFHi$T=(Ok=u!dAlrX@%7AK+bD}<&Iij z#PJ4jy*w~C)bb(raM$ppu(!ZZEQY&|A8|NA7HHoFtQ`xbkz_%~gF{7O6ccC{J1;Ce zYxrRH!bH}x*6@Rh-dgsa3Q&mkRI`8%um_Fmz>I*>HC#39pksAlIvE*y&a*He*)9TA zh(sX{Hm?<^5eR0`<bWQW#RVGe1~sQq%2iM&17!1OJ@D>*M66&N3#ws29MKTWP!!3) zzyRH>R0N&`#X2=52Ra|2Br`V^wC^2r!M_GG14A(=6dM?}1Z@x77zVmKVRJNeSOI)Q zFla3t165sJWC<!|$Z@U_!nu2t_N(nwJ0N(#aJM$pxtiRNZTjE~T(Us{4m$G-e8ML9 z5Fp6@BJfFM-~a>p`xbL<W}c?tEygm)>@g@dZgG_s6r>i#gHAXG@7gZ{8#V!CB6t)4 zd^!dEU<$<b0^nN|U2kzfj`s9+EjkH4#0YF!u^<Bj19UP2L4n6MZt);TQjsGg1A{oo zqe|f813w5b2#GX!Jwd)~KoWGe&;t>1$aN(bMN~RCAK<up;07Om2lEpJRmj<%7quNO zD>z={knNC|pfaKKiJ;g7@Vx|loRS@m5BMc!2zJ)YD7?h405)!-%M9@af-|Kqil|Ir z1l<PcI3sbU3*@khnVu8tC$QX+)&iZOvC?D%<7H{12^>!}Kt~80xL($9yC|SKL1Ko= zjM69KlGDp3md#0;pEfgXVc>$2#o;R=F3Xu+7B{~jYQBT@qNwde<_{bUVv-Y?KgjZN zN`6pa;1s^jA$gHQa)#;x=9!vTIaEFvOR}ndu>hUi1Uie&>N1a9gX;rsfd(JQoD8U? z3Co9|E;6Vz{@egQSrl{~2IvrlA{j*fs$m8lt%7u#FeoQ7Ffb%C*03Q&!6h9>9#ai_ zEeGgUBdldI_)hyWh6#*Cj5Un7Rk$$pb0#yTGbA&`f)3pSoejnSo)Bm30i6I29+H^I z)WgrhP|H=zi8v<->zQZZqiSH~QY|+j)o_(ODa<uIDJ-Bd#~P*@29W<Ct(zKd2#qEO z?zh3^z$e+KFvIz*U~{-&Izgt@a6{?@s5vk>m^olN1z`?^4>JeC1GlznxKmg`=E2Q@ z$bm_?9KsxkTCh1Kpx6he1Z*doW`WKK1j{0b8txiI?ZjKd3-M14GlT}!6*asNU)3-} zXpk7FwyWWVxDv#!<*VTfW=LUy(8)}U49QHjoaGEF+5DbE?0T>tQl5g6Dx`x7suVQY z!DpgF&Z!11MF5vlw^*xEi}H($z~_vt15KmyLrNl0`2;yT2YkB%{BZW7eo#>ZIicOV z2z)pi=y-#oji5pcbRNMi=AsPHbq)EcX=&h-;EGX3D<MS^q&Q;ENGvJxWMp7)1(iY_ zkWz?;frD=XOH=I=LC`&davM1Js9hFx>|ndWBRNOyB9CGx;{zU{4o1+yA<q52oxU>^ zFY?G-Vwd^A#-OOOoOdDbio}Zw2G@A(W*F{Z++nzzr6X*DV|U~Q9=i_43p{pDc=-A~ zIz2##SWF1*_U~Z4!za+6(U~!!usgeh`G%lS2XlvGC))=$24Tq=f)h1oI8M}o90K1F z*vWoFNCKOnFha!xA<*TXDiaDPsv`t%;1EQpAXD2D0io%f6FH~zPvl>~xIp=`fLaGD z_>j|1j|qXGJ6$?`XQ(b<oT&l126d+HMIQAPtSb^%a$Mvw=wST7#lXYc;PHVKbj%jx z18$xMkDs4R8F=l%SA`v9=W?-TIVi;LV$G5aN=%UC#lQec`QUPI*=V^3Iuwy<wA_Ow z4Z`IfQXRqv(}kenr6RN>L>l@AmwQOMky9kp07M@Hy>u*c26at2uoZztuAo*TyZ|f$ z6<3<Xl!8TWAdOgxz#?~$ga;!7!z~ujwr|`eAZtnms0{RBWMC)+m4Qu=GLVZ(WuPs3 z8E6YC18u*sftokNyA%W+23^rvF++4l;Y^7Isw)^5YFv=ez6@TH!^OZO(BS=rl|ewL zAEbt{ySjq~eCpKCPv#7~cCgaVl@C<<x$?0jgGxJS8e@P@N7b=_?r#NMSCxWvyffl} zK2YtAe5XnlwCx8o2t*^DGR2BC`GQ|1@&<GKDhW+A!Oyg<<w#*hvL95!gPeziA&Vnn zB^{DlEcq~<Aq8=FEkg?XB1Xiq)SxyDIAk~x{d1)DGM15w35?)lsJRH*$+3vBhOq{G zgb!q-07AmWz-?bNF(lQXlk`z^gVm$yMiqnT28*RLBm7v)jo+=%qais#OJ!0RA*-lh zAzRCX=oetSFE$HQJ|V>mh>6;=B%mMEKLlw)!ZqA5+f%qeBN4THDcmj$u^9{uwftaK zA_GIMKn+6{=p02PeIO<z#A?8+xN2Byz-?u`Dg{98?HV?yN<O5J!Zyyq3pJUy2CNS@ z$^uG3HS91mYB-^)z-qv998fvt6i)ELlFU8HEDSwa;JafrSr|${L(d@RgAOETs9~03 zs9{Zk&}<MojVX-@DP6+rcx+;6OluG=dqE_#IUr_H&1|6>!5Se@4yqNd5e84Af*bfR zL1hy3JoK&h;MPCvdRx#?5LgZDMqE&KMG*k20E?~#wc)`mP=}$Y0yOZ$c#E|-Be5V= zlLvfiGPu(Kt}$=1<maUpgKJC3Xb8Lpy~Ui8S^_ze6FmF@Dxx$+iZVf_XMl)QP#1y) zbmHMH4iFiin^;i<Iz#goqt`9Ai1?Juv@{S6Iv(*BYi@i_eloaIa*H`4-Wy!G-eL#W ztMT5qSj+Nr;=OM%=H6n=yTwynkXTfl>Ig9b&I6mn4wWkgr7}n_1eJvJNrV%Ni&Jxx za#G{nT;p>SONufp42u#O85rh)>gVZ93=9nnpiB99K}VqrOc9<CHd(r}w!s5*Q32?F z1c}QWQVouGIJm$UF+h2|kc(9^E`x7FLE$~%5tyPhU45cD=z!=Ms+0AW7%jJ0Xt64M zQ~0j%3p%csmEA51xn1OOZ*YZ-%#=;GzRWBCnSqa0s)O+fr1LV1bAj0k#f6qD0vFnB zU|eXw!EJ{jXnt;^4`_bwK-dY#gOMi+55`;w3O*QrK`Hb?IB1UUVmNq?E<E`{O6tY% zv<n$|7xD`(W)xm1D!!Oea-p>BVn+Fe%6iaNGA1X`ykHfhGiWBzg~=7dgKsPa?JI<A zEDdM$0x6O322C#V`!IDdL;F`V1ZNn|V4YF8KyXGX=q`!{t{23$F7s)3FyDdR_@X~i ze}VFX<jX=@9qgd1gF*co-if?3k}nG=Eik+&pwhwmfsH{(@)H9ar&NdI4N;j-46M9T z6ButuDP5P+xhSQxBJiS={szX2QbrRPC#Xzh$1NeRvZQEv*}}3F=^GMPX6+C^Ah=Ts zbf~Jl%LKLwB@=mW$g3;}U17K|VnyP@=!^0OP*KQz!V_6Oa<lSEePU+gl=>jZ$13$j zfk8xWM%jYY1tl|TR}^j#-Jr6fVu#=k(;Y55BrfPVU66A=<9s0?`m#^VmDq#}i8+^J zb1!)3T@lW2@OdIEOQy~Td{Q%FF7jzMxIf?)g5BSXTycX61yIA~vknvJ@a}d-Ldyt| zP8o(b3}6*QEn5d;2SXZD3S$cg^6)>B9{A-pFg-2|vByCt{341>Y=cbc3@J>D7;Bgz zw@N?;J|HAq3_MhWCI%Ub0T0xqGs9Ja`H%q>xEMq?n1qW#bc4lEDsE1sYYISxAh?P^ z5Lml6@S8562lI2Ku)*yCy9dq#+XpJX;UZvnfJ8u*RxNi5y9-0CHUmQ~PYqiQ6SheS zNGR2SPbI8issJBMkiu5O1=7#NP{WR>%0VKasxO&2nF%z2)}zb9&~uW7p#<Il0Nq%X z!jQtTh7*4HE^iHQFoPx+>gpU&0S+1u`V2Z;8MJ-`b(I9_8VrzUptA-jYa+l$h4X+; zP%bLS&xtR}$j34XDV3gDk{s{mYKS?5a0XQVg34~tNt!VW6eq_o3|vsSIOL+JCUjW7 z2wZ<5sxwghK`sRV)np+>mqEi@oQN6>QlWubBam7QT!96Fx^Cd%F;G_0<SR-634k1r zR)G~IfFz7S^%?m5Xwc2DMUfz}Xi&$GH8(RQ-n%FUBw!3WCIHmgO+efn0KYYa1LozT zG|)x^kT1(X^O_9|UpS!m`AiX>5Pext@iK=}gX0q}%?4-Wn&1-y2dgt<2jdqu27W=% zNY<dP3<MzKRcPh<4FTZ^VKW>jM$Ral7_&fah2TQX6^;vaE{f`{DBF;@vT}#u%9@L! z))xhAI#?eFN_=8q<aB22aJ<38+mUgdPvIh;!UDmIe98+PFY>8f=22f^yn%J4)lTLe zuDiJ|YCB!zak|2z{*f7^=>tC~w7!Tia0*OdpCP!QWQWKV4!Z_N@RG#_56m%s&^Ru* z5M2eHIZI)z<v<i?ps{`AaeP=dN@T2IN3_C0DXf+gQBWerFc60qA{Ci(3=FkgHK2I_ zaPI<Xj+3hfR7|pfvTY4}4d)uB)u6^20|WRrLr_+)<>*mhVW{B<24AEL$_XnO!J`zK ztVkmtAooDhK4|Q9C3g`heIlKX4k{lYYr0WKeIZlz;PDPnVlT#ePC9FHT4`Q#Q8pt3 z!#z-12Ca2%VEDiRK7#)VFX-aQxD}pT(k}DbHn`p35S@~Ckwdn@5t5<{CxcHbXJn0H zgj{J+I6<(pWQy|)#u>^p(iQ|RsN0ZvfcHXp#1)aqi~NxnIU+m3S6f6eg5r-Kb`LK! zRKUgd5RJd04A6KL_E-ar78QZ6)xjPET%e|LJR}-&85tPffT97^?j}7NVo;*Nb%x>$ z=LKvF3RlSO;5?vvAu{TUNc2Vi=!+cDkZ6cuL^}T$6!72}U|?Y2VPF8QQDpcGA_=uZ zKx>t17+||BYk3jb4clZjnrc2I)u3t%<^m{P!;6@x2j5%8Un_v5161TgRl+FHWH{0w zq##muWI)QaMhpzKLOA4<7#M1W5rZ8yY>?aOYj|sfKv}VduSOU|r!&=Xfjjch<Xa;M z5w8&f*WM~D3_T_&%K|_(3e1^M8g$7ch*>Lww0r<G{R|RB!ZrLg0yTmu%%CYtkli)x zkfWv6u&joL0VDDP1T5Wt#84}WN>BxjWDkg$!UhW`6qTSe%|X&g80LCL&?YigQ2V7; zG=;qc<Up{h8V;CAphiXt2Z*fU1d%CV5=kYz{R)z;6+;RQ(4;-sGz3v21~ZqDp++o) z3#>;RNskOdEsRwop2Cgj^rQF;)Ovs^gVG3}Rf5(&gZtm$Wuv#4vr<b|GT&lJ&P^=1 z#Zs1=nFk^hD{e7W7~W#4FuKK8l3HA17@wAzmztMaTzrc$zKTCSSs}4Rp(Ham6@1OW zFBZLmy!2bFC7J0NB`X<kaVI6^SHx%LC1<1-YYKoHJhwP<GV_WH5|dMJapa~JXQUTp zrhvQgkiI*pxW2_)kW+GtEiE&>v?%o!S3z=qPJR(c?Jc&7oW!KmoLg*_5Sjxlm6TX? zi@CU@2xM<bPU<c8;>5Dlw9NEdtjRg~#i^PSw^#~tN-EGRmRL~hh$*Aw7E^NaEtce* z%-o_R(0Bm&dNokf5OEPSc&-sr6M?Uk0_B4uTaaSLDDZ#)ctyl5W{{;UpwK941sTqr znVXoN8efuGl9O62fo(MewBrxy@pFL|MI^@;<m8tWO=M(X_ysC2eZd3qU${UmFz!z7 z362vAXQ*7}kcV{b(L}g;`>i{zK{r-i=9X!6X>e>Pgbe_wOemQkeHmKAO)%+>23_Tm zz}UffM^J1+$z*9zQzn72!|{%o^bDsN#S07<gslh!-EOr(af9PVl@;DQ_zqNF2#mWd z7T@6rz6@Hh%N^8|NnrfI%*2|&_=Sgoo3CH1Q>$C2!TAmgd%ItwAL#za355$-SA=X( zTp7N@<$&N$_p2-}AX%?Qua1%{EF$P^pGKb+zZ={<9YLKY4K6o$Wab1fkeD5Nkw<HV z@kJiJ6%`kG%r97YUgYsSqkJOqwAw|UfCg8PA)bw%9jRAXL~aO)&tRLV)8Gvf^ltQ? zz`a8JDvKeA=h^7l>J4dGK?@gft^gHt;FeVtZ0#CbIs@_zo`{h_tnH*=P}dT4moubG z1S!C37@$0G#|xB^;Ue(P7r0>ItYO1;=NuEL=s|Soz}+jP!hxL#Jz$#=dU`@x7!WN! zY#rblcmaugZxvRR@LN-A*^w#~&~=MUHOw_^HLS4WyOs-!JUfy+H>k1!9XATPq8L;^ zBg!Pm9cs|4NI=CU<gRv5byEZye7?n+o?239pvhANYE=}afx1AT8?PZ9BpXl`6wJ#{ zNz91{-SZZooROcIoC+DSi%+hAF)G117_=-Z<rZ5`W?p<*1(dD?&%Z$PD42p|R8UVT zJ|{D;tY{V^1A_z$14Hoz@QoCp1)PEn?l<@Z8r<)ImoGsE$v!c#vMO~j-ry6QU^YW> zqUDUhi8i42%R<f-iVJxz3hS&0+u*n|azo+Dn2W-e7x}C@m_M*Fa0>MEb@ELtoFP29 zY)0b5$}1d-H#qoF#jbKFe&A+hRr<om0O@p1D8I}n-{1}%?1J*XurcrnPcWOIyMlXz z$Bys|u3?vX!yDZ0fNteM7kt1YG{LOH{sQd6Z{(Z;VuN!EXa^W_^9gi4Qw<BEr-9V} zU`2?6>d#s>&_y6MY@oCc;=^vw1@RFwDd4NKq4MB#43dZBq7+7m?i3afAEX9E*D%7w zK_V!o!e#K7ico`MYC042k_1*i@L)Dt+FZ$m=*?(y6wLtjv1ft^EK60usSMn6DFzid z;N%4Fdvm6iCFVeq)Lcde20c)c0$twM!0?5Oflqpd;S6kjl?U8{6BH*nPE-b$`GONm zE^<rHa09QfFkVo&&~&En3dI$UD^)Ja=w0Ln_h5H0ZcN^xxG{BQ?L}^z9cc#w4-_5@ z-kE(u@r2_^m5X+MC(15_g<pt>yciaFAu9S}SoF!-3$bw*;u9{$CSLSUyl9trAt~cR zCU{#%Cg{Asi<x;BlkzXx<zL_`Kshl5lvKc_D+AIpjTCU#717S6AlV=yj0qI|phySN zkmy>;42>>ONEgik^<apJom=eCGj6cNkYHj#K~a81JXA$|a$eCwMg|6JP)s?3Ge6## z;u8Tcg9+};o}mU_PAI$}aG~f--HUwcD8r_p+yx6ZP`3fZ{~W}?z|hXr&W@7f7-|?{ zb5LENqhy$%L!wBtr=VUsObvLgF-t9LEgPuU4lZw*PzTpQ+j)f<7#Kc-S~sA<HAFmN z>+hfry>-GJ0~>M!EqR4)L$U|u2~c^AWu#3aJ+-9F8_WK+AW+bP8s|S67^VbG51SY^ z({V=PY?lQRmqoRpBW;>Yh{dj&9FPUAp!+_HK$bzSQv)qsy~SJxU7K12atbth!DC?1 zJ89S;t|(f{$iUzYvKTa;_7j}_1mtHFE)cxTuiW7AgiEWz`3d;;GUM4c7g;14B07>M zs6OD~pTOFII4%P;-hmN`pwx*Pfv~E%7I}IPKJ8q~j3_v<P1t9Fc9FwufzqJ-1iCvl zg<%amK5AKNSb`Zenfx>vZ!zZ=rD(DgIfE)na4iZRX`Brjj%2;X<ZXD1$=m1_v$L}y zh=3dy1=*DhDy<N8UrJ_iiEGgsMh1omP*9n}l0Ucjgrdt_QlL|r#NaION&fwMoq98b zX9Uj_U%}F$cbQ8UrK~{q323YooaiA7#KE`vLGRySV5k9Ihs9O{8hB!_VXk3Z4a(mP zpyn57sHq6F>`;^OmRMS5US>&ZS$<AwZfbFIMrv*<Xp~oz^A=lja(sSHO3^h?0Cd6< zB~r%#)N8!OoLW!}T0EGK?Y=TlLMh6~FWSV&z>o~`J!mKzF`&Vvut01D(+aZ<${P}Q zFoV{XU*>W^@hsHI;1~mW9PHTyNDgOiXK80`X9MN*8iquM8pcG%4weqq4)zY_4z@H_ z@RSD!RD=mE#+Jg=!ht-dSHn@m3AzA0g=G!vYEYI21rJvZ7x*GWltt~JQ8RFUpT*3; zFqIWKZDPysh+)G{_+|rm3T8w!Eb*yxVVJ-e+r+?7%Y+!A=;W;dH*P!mQHm<&T9#VY zP62F7S|RFd*+3&M=}al8n?l$TE<s%53bt1Vlulr_fv)xg)fpgM%UQz)YpjDdfJ|U4 zVn_73;W{`w1z}@B;1L_HTJ921D-;})FjbwrFtb1=!+ccBgH)Dc8wFv2_^AeT{U3OF zHApuoC&NNGg%z}*yI2^M13+?6J~yfkh-i@wXj})Rs*|sV8`N;B;Q=kCKvfGf31kaM z4TuIc#X)Sy;TkDSP+j2oLDdZ^%|I$aG{`nkozb%y#G=Grn48e-;$mPx3O_7kKrnr< z&?2ZB8s8wd!t{Xr5ArX%uO=||$Rqrf!VH$@0$nLn1D?&Az}TY?x_z_;d>J-OB}i2Y zBUB8Vo>mry8io>3c>_*AAlpkol^uk~4Kg1Tw%}|F9-skth9K=vP|khc`*!Ps@5eUS zY4U)_RUl(1;7$_ASKymRA-gz0RT{XkE&^=?xy785pMHxcJtsdYF(<whR2<&ogz_>< zG=*+4mfm73F3pX1^#-?CZ!x8L7a?|jWP?gh@V*f6Vry_+y988_f;WCJrMN=s;0+)l za48EaVZgO<F(}?Z^#~k;j=F$cUU!QHG*(}wg1z9@v&qR%PRuE`(*voh02R8^n83S| zp9l(1mzXFqM|no#Y_-dRiXCis1jVKl&oG=^Hd*#FgA}KEhvO4D<pqul5*NCx5S*Rc z>E97Ffn`GI0}1K*yfb+hBwm!z=<s|XAvr^FLg|de1&lL7L5dfcUJ%plaDO5y1-dBj zGlKxH#RSF&BBB!#Q8tJ?5S9V=<P0wht2|NCSRuH=aHYtK#0`v>m5e5OPY9kNdPh=m zf#L$kg(?dQS4eI!Tq(1#{<5U`1SjYY5s-~DQx+I5h+I*)FlM3sMLDC(;>H`&E{WS5 zU_79BkmW$xA>IptK^MTMZC?zIzZjTsAvyC>VCD@Ktrdm~JvT6KaNWqcLun(=j>H3; zCln9z?94xrcp)J0WXeUmpo<2<7gd5UfZFo$7sEg&)LaZpypWW9F${D<&4u)gi>a9x zva&DalwK$+znD{TF{|=oYURbSs*5UBGuUPn&f<JPjx|Z(bsR})7t%8>WENd0F1eUl zdNIB1Vp7?a%IfQt^%vnwIY8%ZGr54)a5yr#f);92F}g8*VPJ4)@?`oBqQINM-I;tr zOF8_Qz}vqAP>ur+U;>{2?gzQG$(;#&Yf}~E)}|!Jc#zTIj0qr0HGv7_r`enzI2aVw z7Zfh!T;aSybfx!21@j4RGYltteoz$PwfLaIASpFNbtXHcdwx+ubA{uI)RpcVoK|{W zlrZaXn_$@O`N33*)BJ-4gP_n9pNj$tOEfR?Yp+PV$Zv9i!vr$C1aGl0Fff2}D<~Iy zp27;b1SFlImNA{7mI>D;$xb0ynFOO<7$z{qP6cg~M5<kdAln@~MPRiIxQWkN%T~+Y zDTeK?WQh7&4y^SiX9{yI7oyxmsXLis1wq>+Q<!UcI-$1Ka3XDu<gMX=@E{u~ComSh zL#p3lI(R$9QR+{=T7JaP5X{_8A(&a9QViy^S^-3TjCCyn1H^BjAvTChK)OL?Dl7yc zH6ytG1P{MK`Jnm}>UNM0h-i^P4OgcyXl4SXKH!6FqeQM7K_zr8R}BxEYEbQfOEu2~ z#ymxQ<|FwOdrJZAewe=qsz&l1Ob;krQW!xbYIuQ86$OO?*w+jU$n!3=bO}=x2Ll7x zPF4m6!XZk~$0*^$P1qOMLI|{(0v1AG^H4(y?An10A)Xq>EKr#OZY>}P#3F7+h8iZU zDxkiFr3FSDTY*y;YuG`)tl<H*WDu$+F!gZMa6tyrKq()};+eqIBf`Q!wyWT?WVqdf z)K=nVV1T<1yxs_uYEaxb17%??GpJPtT1ML`+9{gGv<7hls$h*E_(ElDEv%xgphk>8 zXz+*!GC2iqU4eJlf}25zh84Ik4QgE#fo`FLj8g9a=>`qqXi60A1Pw(q7v-1VVouJ_ zxy6!_Sds{CT@~#Hslw8{y2VnKnBxtZRsu17icp(cMSDO-LYI<yp*Olf%}PkG7Bmim zW9kXq{DQW>z*jxSgPK=WnuHo*;8}f;(pjLE*jHw7OAK@>mtY6;4L*Sm<{SKi9V{pf zF%3>591XD>L^Z@72#a*M+<~>mq<F!bb0sFQ46Y`byz+wN6{Z`6R$45~yC`q6p?HVs z#>xYN8><f(Zmhj1?=pks1|cQ0SRP<ia>VGk*+DbNFzsQRD*-{*1Hvu_gk1=axDc6i zAvxt@BxvF6<$&~y&Y**LE;?tTwd*caRbQy7y;xOuF{|#PbKMsP24^N$P#epc$rIE% z0#P3r7(mytd|+UJp2FkB<Ogb$IWzf#8f5`YK_CqQOkp4mAnF4H1L%&H4-5>Eh#Ocw zf_UH?Si+goK-vSC(m@)WnbJY6xGbh@5YGuhIWy*fcow-JQX!WKR}dX>I_`GR?S$cp zz>_A2eGb@O2@Jj-7=AGjG**2f3f$g|O1l`5emOAXqBCfK`l2&vjQT=u-o<Rtg)|q6 zz}_#ayI4?vF}wbvbNv?v1{WqbkcV8Dyg;^ts1FPb-b_9qXZkXM4<Pbp@&|drg((0O z0)b4yAPs>`;UEnl>H`Bq1gt%p4APeZxqK#yDGj6~f+-!OJ&-8_#B*WF04d340{ht+ zN-=`{ZjlF4p^(Qk19VWYsKj)yiC!}br}*9w5eG9XR|w9mxgw%-LquY__eAdnj0;p3 zvaeuS$a76Z_X8J$jKTuNnHCdVZb-|67?WH+m`d@ge6V5Q73lZs^qNrE<9maLAHfvp z_wMwb!8k{ACieoz1*r?&SGX+nxhP?9jmPjqAfpDS(U%ZL24UG5DGP#EC@=83AgjMa z?SR?^OV2ApULEWY_{BQ-zzyAWoy;Yg+R_1bFF{3)CUemZQ2+2IhyWc64?2AiRKS7D z!h4`$M($gz;67&&c=QdV2~tkUgS3Eqji3?>GC;Q%r1dDMoMS4@fG#mg00Y>(Nzpox z5)hFKDvLmuum5de_`t^?BH7?`LqMp(^E;b7tLX;@c~;Z!E{sO3;vX4|SjE3&F*0z8 zPbit;xx#ga)QO@ioIwpv54d?7Ji(qy1`W%>DpmN}We#T0CM!e>8QT~csAXNlu!b4d z&PAR?CQ3chl0Ix>zMxSnSeqC;2E~coX2;s*XQ*L7S~bLtXqO|VvRxQrEkG-MamZ;f zFx2wautQe*)^OB7?;o$_t6|In^=)7w1*L2FU^Q_qe+>(CI}A5OR}Bw%oJfm>p~s5o z6-3Bwe{AhgPSB`T3QG+ubVC&@WCDH-d<q(6O$=xh0_sv2g=1Y1C~?A+K<O0tv^R>G z@NpZcGSFSoP$sAyRs$aYsTEA&Kw1~X1`8!n+Xb>}D21y=u!;@Z1{O-;M&S!1wR=JB z5UBYuszw-Q9wS4IPzn#oRFt(>*j5gSr0}kR9~Xn-BSI^O`28RwQJ}$W(7ZxkX>L(I zC@&U(2v8;|Dh07X3u>x#;@!e@i}Q0zOEUBG;uVq-lS@*IG81zYQd0AZGfOHJiZb%y z%ZPq4sisxQxP>XeG=OB|GvM+iwnY^nlPf_4Xu$?_T~SdDNT3!(fVa_tIx<Cxpi-p{ zBvub18bCxNhyZPGEoug_T0jJ7wN6nRh}8~i?uizGjsSoZOrTXlkiu#ss3c-5%E*VU z6vDP3sHhiY3TRa-xa|fx_#M1bwGSlG4<bOe-QonV3W_htOD~$h$iPr6%7Ept6wpu^ zWZDIMU`kO&K4f*!IYtJCE1(iD9I`rykAaiBAG#K30m~H*Rg~31FcB`Ee(g?e(18P& zxuhDM8yp)F@vIJFVU<N%9K_5i+u;a5Jz$35#F7cp9Zoj{gs%%IToh1PAaq$krNi-t znB)w_iD^g+gxHxmWj`>pu*!bn!?QdHy!rt&9lbO2DvJkbsSe)dK_6rocz9t8gh1nc z7L#i)@~B);4Zg?|d?6&_LUjDakob!{@eQsI1oT#vY)DyIeNn)w!4qUMXq`~z6&4BN z)(L@TXJB~@l-NQ1&y!&5gqT4o9U_giMhHHI2g{N!46$oLgEut{SPOM<2NTyBSEwtI zkcVYr3qTVZ@bjjShP07SQ>g)8cMY#Jm_cj0Yq@KAYFLq{Jc=xl3V-N1S~ZL?HK4o% zGXYc;U@i7}YIwmNMkZukHB69!8PH$@YDcmsp@tosIVteXJB$oHMc|XFuvIch1#!=M zkSd%5<tTGrpm8}+zxgw0dBSvt8V1;GHRw!1>{p$rrl*#`Hj5;JO0Xc0V8n>DO-^EN zQc9woBr5|$G04ych7F0Z$!X-3OW+>kDNuO?nl}Y^5jFWBMGUy@a{^Slu()}<x<ERH zVD+GZ_FL?Ur4?Xt#E3g+P8%{P4;pp{_XUeVeNa$&fxwWGLl}$ui!L)VFtCH1{1H5N z3))A>*WiM(_FWW||3S@9--*5p6!D(8^gu|YgB`Sv5PY%{FQ-z6;{yS~35pY0CWOxj zoEUXcK%v9&iHhb5#g*QdRV*&@D0W0lNSR@ZeEiZAQCalS@XMmc6PO+dh)?I6$Ty>O zLD0;K6+$aKHzcm~xu|KmLup6gPSuN=_7~+Ht_py*O$bVW6&79<Q2Zdx%c=B1j)9XG zTq?R><&gQHEy^nUK@Ys)9^A^i#a3KWl$n>F42m^K9%f*G&prpSfm(a%47K2;fawgi z9O(?ToLI9veDyUVyH8+>y$8x@2=yuOLIFIMdj>R~TEmRBH~??9fQ(|+@*rk2Yj|py zkuo7`3KQrsr3s8hQ&X5@SQ!{<`D*!V1!@Irg=!cfZM9nA8eZt;X%VFC%}^r(&d{K| z0Gb>Yt>LQ?h3-@nhS1>b3OcljuZF)y6msNGjWC3U%YjyIW6_tw1|H7?=@zIF#HJfb zg<y>kHoLLu0u>+NttCRB&C@kPkkQy0!5TiW3~0<%phghHt`UOTzy%pstQD&ff~cwy zfan7G0krNMt7`FdhFXbqhFVE*=z$h@;txFuti>2>jTkncfoue|&1%GP`VrK6gvJdc zLyaUh(`#5z<B*X7ytos-(*f%kxdbeI)=HG%S1AcsDT$;KTiY9MW(o@^H5MrmP!Bg5 zsk(%VfvS&Msk~SuGqAMQYb0tUVfg?_C0^G-O*W`ug<d|Rkit?c1&&`X28LRZ8Yx(a zf>W4O4-X4NjTDj_;VT}XCUKX5+H2raBL%(}fRUkw4O?v0OTogEkpV}g#|YUsS;GxV z8%U}-u$h+vRSj7HiKLnno9Y?{=)nk(BQC*>4J19-D`BV&B)W$c&Gp2%2fXi;M($xF z#y!aUM<y_V4!oSe)bkK!{FoEe?*@$@r!dwqfpRd6PGefb2}_H$(n!5K=^C~ah7_(f z+^~hPwK6p_!3>%_kadZ_cym${i%WEi3riD=Qj0ZNA-yTkx-qOQP|FiM|6j$QSd>|k zk(*kQnXH?enwVE5?OIfnUu30_nOByWlbNCbsnirQ^9o8!ia>+c(CwzYNJH45-d#}w zs4W6s2?okKMWE&bWKlJ9acM5(d`)+d5*82vsv|XJA;Zj|JXr+l979?!poUA)6;Qth zeAv)_5bFSlI0zyTJ)y~<?GikBshQ~+N%=(}XWn88s<_1zRC$Zpv!db_vu7okRS6kG z0{5AmKzi6AO{|JrP)23ZUXU<TVdgEavecrI)Qb4B#GG3kkYg1B4Q??P78(@G;_6Sq z+DG8lQPE_OBSC~Fs5R8U3_ewXkAa)F!TAOk4~*dEMQI#;X0T_K?qGzTCHa}bmQ%XJ z@qw7+^xTQL3mh+ssVykHD5lxr_C#89h1v$?%hF~YJ|EZ^1jJ^@U*^|%pkWF+DUxY| z(+uVr!3&gch|11LyC|wUf$4^%>L&&cLGuZYPb8toS6-IXp5XLAL~Mr9jKrCy3xpOp zE)<<<eNjYxg3}EgqT(~0W)#kJU7)leaG~l<?-hy{rL`}LXispyAwq(b(u%;9svDR# z7;a=;seMt}?4pR-MCT8J9D?Q_L>Oe`XB5peTcErkX`$Lo+ZDzuidUL%Fy6tq(QKvN zMJcNZ>^G$42&i}<Dn5bf2ABZ(npseFg5w<&U*8ZBo9;Q$b4KC<_7y@a99N32DB7U7 zvUH{NMFrD~a;6*7cN85^+*x`c=mg`z(4Ey6&Aczld0!Fnf$2+HAUHGis)))5c4k4< zFI)^_64N6lM$AZ>95aFGj;J_-^F&y5y4ggt8A_9_76?wVzoV$UTx_A(3Z}(UYn(SQ zt#-etXgtyD2~2c_(PG&(MeEB}mTgE{U3F2>a-tXLGzf_c(mIy}bsosdP2d8@Bcq_k z1jid9a*%kqD55^m>jN{RpvD7P^)<{Zd@gHPUX-<*$n}MdK~U_1nDG?>lLycd+Nvu& zN*~zZL$@;oCl=2zoLG8=M;1PYiy{Uc%$=b)lVwKWO!ftgGr6wtC}UR(9RsF?{yx7? z)r_{B(jQpuS*5=?F);{8%`jP@yr6i2)CDP>4TU?zHdY)EJYaTE;)0pi6@Kpqk2_p~ z6EtQxuW;Fsa)rwoWtAkTpAD{Fk(ZByMrhg@+8NvV+WFfB+6AXGq%+iF?Spq>X_Ut< zWMJsx@8IuXOamVt(Z!d-l){|Ol)?ffnbMg+*KDRU)UqNq+~F5$x-i7%GB9)rbTV{8 z&KP3p1fTK)YE0L%AuY^+$-}1^y97J<It0_0QrJ+Btf*l_Tzmpvyune+3EFX8%iY1y z!QUa!A(+Mjz8(~5Q6o-0JT*)ZcR^fL!wqiaf*1bP^473sfd;Z+Aqu5y;LRFFhMs&- zFB+>VcnB~u^ehIc0-y4X(ys<p65xKd76StVXzh3>Qwn1zBdD$f1rLIk0$;5UT7q6Q zA5{E;`ra#<A){&F`r<KYKn#=uP%pg%oym}tT9Sxmt#})#^aknu(ZDbx7_z{AX2gPI z@cswo6^ScV7iL|MF@$!^vCa0Pcn7p@7VI6+a5cy~i1dJM7$5cAiB2Y1lK?ijfV33; z78_{#w9o*u4xJ=FP5}84G}H{bkt}F_*vznnZYvBId#=d1ENc$+8Hz7J(->f1<bWat z<?gml#!m2}&Q8#6Z6H^G7BfI;2CSo0h>j>sFXC+OP8OJtK@kr1wI&Pr!b(WP0(|@u z2WV^+dPy62%mWl*kYQ5rs(etWD}ZLDAyxBn(0KxQ;vBS`s#u=^JpDW;9CVb=!Vpk= zUzXK_l+RnT4=5gRJg9P@@Uofb360D8!53wNp#i1IiqswgZHk4QmknwPX$nEwIiOTn zbP+Tx!v;!q-mXR9;TTpB#}zUi4?b@LZ7>FWd=!(X=Pi!J(u#PH;-VLzRF39ZP%A0{ zyaNH&AaQ_SF96zK2GZzS^p25%p#~K1FBupZKvP>h4159&ZYTpa5}<*aCxV6@Y~bne z38pg?XE@FfUBI|NaYpF{A=M7{8v;TdtkBlVCkB2_<qpRi7~?Z{#MKrQt`J;UvcmDQ zxL$|H7d8fAiw@^Is#+@yR|GGPxyYjg9<_13At2G=_)I7mtDK-@htmTI8CX5Ez;Gtd zf})G!S{)t_cm(@>I(=qHT;);vAkWXK{6UF<lfR$0lXpTYXne*{f>ri|2?LK%gD-e| z24!3Z)Bpx0&(EMM9HxVgNMi)8)GlJKVXbA$6Rcqbok9RD)!-CjHB2oVXhRre+Zd=k zs$qeg;R8yp2pLF!3MvooCxPTaeF!8q;64sW4M@C}J%zaz=`N}iCRk6#g&|gtfuWYO zh6A)65~RKcd^jJXngFeFhKSd&z~&1<_8~GFTm?$@0{2ikdJI@VXY7D?o<oK{T|kKp zG*k-dL4ZOG+-1;YflPyd?1JRT51@%y$R)LqCOkN$zXIt8?^^?(CJIWfx0nhIAe(4F zLyyIv!7*53RZswD(p&tw`9(RYmGQ6<(xQ)y3=9iFDRwJlWRU}Of~ZJS-3=Zg(1FSP z*Lh?v^2p2xyv!rt!T5larz5bVurs(*0=$t;rBil7G3flUnZ`2;XPQo|f-Ih$SbvdI zZ9y^k+;!s>g)2=LR$b&YY;bzO#SK1!W`bdN1ZX?6T?gX>4(^V!35lJRpmW{lxPun+ zf>!>nFuW|Idy!vvP5Oq!9gG`Ob{MYCzsPTUk;Ar=`2#BxtKA0<1|GhS<W4hesSo6F zaOwkvF*&I(g$emeS;P>x3q!0wcn}}7n}rP&Ly$pvEUAP8kxB@q5R|b#h^g=tgjjJ2 zPhChRgVG-;U4v=R2p)S+Afb8M8m1Ixq?`rkA#xL9{<(%7WO^_IXsa(MEFno1oM=Jm zRg(iLsp^2zBPgjBf$9xN%7u4QH8~+=G^lzj!j*iB-hhk%CtXnE2cC2xDOUtk5yzt^ z-_MK;3|m3T_ZU+0WyP6%xf+~6n`oiwFOw0J0J%ESCIoh7&S31!nd7~JafRYa7SK-m z%QA*r)ORTEaNMb~qj0nSMH$B<+R&xi7lK1BJB3~e1D%C;AtCW%T+)T)lnbe8m&4L8 zI;CHfNx#UKagigVlj#F16Kf{p7f@R5x9GH(5DYrZZl>_Wh#AS?oT9uSaiQwWtP3KV zm$|i87+vJny}+W2S(@^|PC)@}SOqtznc6wqxlmJf7gwHOCxLPmbnjs;(jl-YDSHwF zLoIs;R|jZgB1;QL7iR|p_!4KvG?o<B7LHmD(9xTnkkM{-P})bN`c4LzIJQ&|mxZTl zP<lnAdQ5pls)x(5BTNSA2BpgsHl*=HP*Q`}BoGGTyfSR2lz`GASRq^%?mpz^5y&R6 zjiCI+)-#2Lfr+6Wln0?>zBS-cE6A!u)I5UHxTCnmUGxllD;st#95icSPIX-ZWe-sG z0BUP1gC}j0XKE}k2DcuHR~RlVU1)VdQV-hDLUA;_MXG|?B4z4i>}087=wx2Q2-~*? z+qTFGzDZq^@s=p)gpA_U<ovvp_>|P5%(BFi%(7J2#lP~Pkt%SL4CFd+)dtzUh}Nbg z=)=n(AA$yWe>5;$Q8e37yu)xu@BzUSf>#v1q27X2tz1au73dDAB5-*n3MzI$m$f6c zeZi#}s36niCQx$03$0t+DVb&Q;8F@!4TJmzUI>){YdShW_pickq=Lp;(J#;uZXj=f zob?C1KaqoP0&xWy3u`hH=tNJw4(5Ae-W}XGIFO4pKX5f{vVn1f;$<0=E#W%?cNFdn zIiR>X{-TV_k+=&Xp%=o!FNQ>1h>W`I8hs@Ov^WoRb#Dr2S>A>0oXat}7hQ8N%H&?i z%fC`kbiJVLVnNv#28Ig6nVugQ7~GiLK?~wsnBaE=et@ja`wUX)3tElm2U~a-4O&$f z!xRhB5XBS+;>9y1fEM2QF(rbiLdGPJTt0-71#|e5K|-9#Or1<0I6*%C$iu)RKB2JN zZidoDZkY=#GLUi;)F}byYUF_k(5>Lp8ERNk7_(g%7>bx{SZZ1G1Z$YEwnE_(+O@2( z`9#Fb0!oW<5x6aY{UR*zI60(sfI7Wj1MhCua-}fWa)Wkh)Nr{l#9D*aZKbf(FhbT| zL0Y_U9@sokSp*jWw-i7kpwg+9uZEimwBMEw)}n>zuL6}?+%-ICauCx%ay&h@EDSZs z>&&=dCV=NoxNCS2C3Ov|91r&S6U0f9xaLo4cwlxgGW09~twXN?y9nZD2nkxw0IFpm zJjh&T3M=@iZHQV3$qZ5hR|^q?l1OU7VFfV>t`;H&SDVgU%U=SjX~C%n+l*Wm=zKe{ zEP|-vuYnI6)e4ZMpB+}))C$%JK;0+^@h>QLYlV<%IZ!VYVL6yp1K)jwR0n~ZXyEAy zkdF}KCz{A5pd6_D1y3u07N~)jI6z83@Gf)Eoi5-B22Ek;$t#5hNJSyI=kXm}a)L+q zlPYd8B~?O32S8H?MK?e_ac;zbf5j~%P9^vPm|HBlnJL*tn?ddXHEoJ!g7h$_WR?|! zDra~D5WxU91&e-wtOXG_K_w<=*?$AW2QCH<-j0%{nj1WV*g7uU4Nf;WxsjTG{H*dF zj5mZtr|VACUBGxz2y|4ikZK3p6LHlAX)BU1iyL+Dd|+b`m+0WWAtv6zeM3<869Y4+ zPKV<SVX+yEh|${xiVIm51TJKs$q#Ehu4KO`p?^`>pu^<`kN8aU1tzm?R|tYe^?9^A zyfK72y+5!sbLxEHU=UYYqP!sIvXb#dF=LReiYQzTu71u=&I#IAIOJ}?*jG8^KFIU4 z%70K{;O6OY>{RI}>{Oqi*r^3Ry&`0W;zaO*js=VtIF(?1tA*|>92a`6DBK{pvSfqf z%JL0`E30-WUQo8#k$8Y{XUYM?o#_V>cV?XsJYjfJ<V500@e6@LC#693gl*Ubj&RVC zi~RkOoss?Vo$)gQXB1uLRRSGNgDLiaOJIUvrzU8(-bBxfT(S+$Pq=x(?O^9_+Y2lb zkOCXj)CVVI<m3LLpf@D6pe*9RHkJn(6Gk*d!Cf0}2E=GGs2d1cG=a2LjsjD-k;WB4 zQ*1DILut^$R#;{RFZbm^IfM|@WrC`NQHX{vqDVv%gOB$jx@5I{h~fZACA>g|6|61{ zu{I10wMaJ=BB_R(i=<i^d{RFbd;u_ctUR5mRuHji9&}s<_%H`S@M#6m5UAmUoUmWR z4;p>#QDtH1u|qyA7~9$Zpb<P!7}N?O8tGY}IZBWq5>8=T17A5-D~zNTG_H!I4#Y&B z*hf+cI>!wpjf7L+=X5bL)Cgm<H%l5xIg|-XX=}J(@rzR@Xd)A;14e;$aud`EY7fJ- zKxx?ViZ$$zieU|^DT+{KNK_3E=%~sR<`m`>o)lgT{d}vT`K?x@Mkq@eNh6e5BT^&0 z27a^yBdAFMo))MTO%W(jg(_k|J5W&&R8ge})ri8ELyM&dqwvL%Vgz&o2h1QST_X-l ziQt12ML_1&N+9XcfGR|yY9vxb*N7oP6-hN{Rs%^ThzUz^j0}|wn&N)Rpvn;>4#Mzm z4(Pb&=?p2Lvx*o&P5`xlkg`0o9*{oJu0tl!;T@nhND2#R0w;wPyqb!YfguHaa27Th zkUE%m!EHCD8YW2H4Jsirpa1{=A2QIw391;m7#J8{f{f8*0@u#Bm{ThXpa=EbVl082 zPz5UHV4Ey%ai&%zCYOMYP^nT#Pc4BAnSf3yMDz{eDp=VV7>Yf?V<$KG_$LTXFq|md znLZ_HdfLRa8NuK|3Y7)L3pG{*E|k3}tGA+PgW-n2jV3#oH(KmS+^Bm|-~OVk{bezS zi+m2CjbuU{T;Nh2;(m~Kiu^&I$OKjFOvWjijJLQ7it@8klR+ypi$GxuUU;s_bc-b? zHLc_pYZ2%uN=>F)Y)SbgCHc9xm`m~tz)RI{vE=3FrE0R>Vh1lqNJ+iLo(x)*kdj(d z2r`{BH6=Y2e7<B+5vYAo44S=Qyv3AQ0X`<O$_(1oQh*J8f)|G>5L(|}befTYp-R>l zJZy@l7j~2;`U-b3a72MuxZh&V%quCX1vv=3%pKg)0>vhH;I#<8*8LVsVnu3k5qz=x zEw19!lK6_8%v|FX2)h!AT?JwDfFd=%xU?uOF*z0Y@_5N2#G-gm83XQVBSztFF=iB< z0XYz~_`isQiGcw!wTF6gCf0@W;5G8XObiUS7z>KvYvqg1f=mOik_Y(|(RxE;fg5wT zcu|sQkqi?910yI=z6LLq|HQ$>DhxT#9J*c}ba=!9#+k{pH7;`~qpX`pm%(znCiJq6 zDasQPC#%g7o)I`(Y=Zn{4#fsX$nk&BbEKJBT^YfP>0xJ6d}3hYbOjw84!e(U26RRJ z9Wl_t59Srl8`w4!9T2)A=7O}Co)x6&12f39FLKDs={b4&B|0T$gk0fNYH$KAa>urO z{ss$MyL+Sibr!LUEMhYp7X)5l5xc^ojw+mpT{vw);LOYw;AQ$?UFM(`BY!9V48|FT zGZI$_ZgAY;dLryXH28Fh=#&e&#TQCSFXonY@L%RAZ*aW9!rt!F=rciZLQ#Xy6&9%n z-0<@~k(cksUgU|r5SMfzHS=Oz=0%>&2G<(``Wsj_lx*a_C}7>-`GkeNowViq&@nY| z;R<SAfXcPc8^EW~f%a-)oF2rO!i;<x7s_&bD+Y#IX3(XmHQ+uXC@Iz=9eIdtEicwH zx>%t5lwfmET!`jnEo%*H4HIZgp_sA9vWB&msmF$e0aTh7^?)XASRj*@ps`8FMVa8E zyVxMzDDdngJLnui&@DEgVQ<JJCFlZQ@X~Zhc7fy)mXyrI+#-1<1_o15<|qP}*dKTp z1cfFjc9eqlh)f8b;W#64hUo=9MbL?s{NNKUAu}y3pc`Lq@bGq|Pe|;{>hy)&U76XL z*`L>$HzROH;mi<7Nk234GM{<}^9^pk3F6(hH^d}6-9dvbQlRzb!myh{;Abc?Fff2h zcyJtfF+y$%X=iGOABxq^0UD)l=Wgd|=WXX}7i|}77jKtnmu!~;F9hKSmHyzEW<ZRu zbi$i^uo}{ZA-05pp-ZBJy@RDgqJyb}19Grdmt+Tf2Wy8!2QyfNsfD9U3PlZb3rCkY ziU?>_TeJf-uFcoM)4|=r+rgE_4jS9;66+A{VC)d<5bt2?knE63V+Sw&5I}S+I@voV zKq;t30M_6DF8~p&6{;0R%<sTd!Q5UeQp1mRiH2Z}ND4cwr2tdS$k0>9!cfDHxm*Jj z%)+3f{y<G9)T6&T89P}!Svr}ZBe9@N!@$50%&?NluLxA@u4Dw)G%ML4g*vGGzr_KX ztoC+|Hvn&4xW!snXb^7zt`#8Z4mAD>*=Sw_p468I4da$%q?RPc7o`>>S`o0#6n<<B z44*+Qa?nbL_!WUGiZ__<kT{@t!tjc$FLVGFWn7sI{}d^KD<&oe1{AMwLcLO82=NOW z$S(zk6nMrT*)#bo5;rh!aNA*dAn-)u6}bRvdS)eK5oq?RNQDWy6$QMOf|PI%1UVJd zzX6SVE17L5+#$Rp^nk?)#|xohSCqo3=~zuh@DMcxjtvHtTcFF+el##FiQC|~A$f=D z0ge-b7lMMXsD@y3E2!lQPW7P1|5TLIG&`9(*+D74hM|+CldY2(cCa99Eem52=mG;x z4oIR0oi==nH90vx*AQ|tIQV)B&}Cvpec;w9=$aK!nuNqa11WJ33i1|cvnD9*SFKRp zz_CN@0QU*U6L}Y6(=Mc!T$V4z<~Mkk12oJA@*5LFCp%(`FB3y28-fSwFm!^CD?=2S z*hVp!7(g>Ju;PS?0lZoWTBX8@El{@x)pXQ@K|vi7s2WC87o;$P3M@#;0*aa<@G>b# z5Z@AjowEw*ZNwYg;(>{j#pfE{;sno&fMVblFPI5e$C6PJZ*YqP&gX=(3Ji<MjFJdY zl(a$?8qJJcP&hkgrN#!s)!G+jO*fQWl(F8Ev_EZU+QC@RG_GqX<e;s~VM&+mk}t|6 zUrYmCW_CHP@M2Nj2L=WQCP&bWL@^`y9<y`^B@5=bGKGSsF2b0?K`LCCB0v)qvJoJ$ zXeRI}S28h7(71<8KOwJS0v}5Ps?WjWuA0(CMxgS|3`D4b2yGCd3L>;Xge8ao72e>I z4RU@q_+C74Dh8)#a8d?evdvnQS{!e1ixsq>$>0_jsKb+9l$aB50NI1K2RvY#;ay}3 zGSnPISbzx7Y*mpJh-D2bl0ZWs;NANO0@5M@EqsE_V}V-l@$jml$bpH0!3`ArAjkaw z51Pp0V&LU(a7Afe3$m(rFg_Rd?clgC641f<Tq3-K_qlY)1rCYlBBd8aN;^2evoQ#Q zgHSMrse|>7k~(x~H54>B$|2t&HNj<u=o4PZamp&)Q5{SVIC%ScI(a5UEnr&UxRCXt zsOnV?wGYaItm+@s7<dF5e8Bxu&<UWBCI@okWeRfR1=3hStXE}0<T<2f3Vh)ZEVsEZ z#4cfA=;FiLOl9q0>R{~P>tN_$PGe00HA8qhv2G^o<O4M_x&*MBlEPfehHwpHBZLb> zY%pl8Y$p>$e+?^SWhuz4TJ}x>2oG}DSCJ*yEuht^AlI;g8aiFPNKF`+8SrU|F2N3- z4&DyIG-mMHRt}`;Aody#Sh)gj4sq6U)pFOcfesr3mm{DFCbk~`8nzT>@I(|FXxmc_ z3)XEf@D28i3_Z(O7{G_%a`+*pF2F+y)M-WNfzmkcY8NzU0!`DP3nekzJGpEO3`P1N zLqPQz_@HqVhk?dzz!jGcXxf|&`J~oP#!j|QR&aBTr4!sQS;GiBdOw%}(#QchPm>)| zb%C=yxEN3Z)l}eJrr`1cWPUNI7J=4T;BEq>xkk|am7p97G8xov(6&FoctH6;>Ishv zp<x%|z{k<VWnIzEh8A5YJ_9u`K{kI50r`v>*=IN^Bql^9g1YP$T&cl#NWv<YA`?)s zf@>BQNC<$6rdxa{bw>6r_PqR}+{7H$V$iY*PzaFX{aTRs)gkr65~by83)NQ2?O?pD zX|>b#gy3as?~9t=C)_V9g@ES8;jJi5jLpHI5yc|#4q@mjRB*h2;|dhE;DY)VD<}mS zKvtrH&V@1n_wjDAB$ief+~S0l%+P`vyg3-;-y&B~D1ggjM6|ns3T4m)AgDZsl*9@O z$Yn7fqL~h|waA-^fguUx*B=ZF4F7+EuOG!Ii)BGOV{k1*1((bZM8u~f_7vX$ovMjt z&FTYw0q`MG6To|l1wj{WV(uwM-a!0ZG5dmI_5{8sQktMuz$;Z&6ke7xoWTB6JbVJn z9ZkIzg&Rax%U%>vn;<#EWC7~~0g)M;3sh(FUldTE$nt@MK~QuC<3!GhEFY8wIi)_R zF>vyM%m0w898w>wWm%;^*dZ1Bpjl8@xev-T;65v;#)Yn?Vg?`M4C${5w~MrkPG_j$ zPGJP^kt|AQsO3p#K%UA$stDjy2}l(Is4y1p;Ot=P5bj{;;7Vf!9s1lQ(!trm+#%e- z2o?eLPDML7I)pn!Iz&5IJJ{0LKovtRA5x!_vlD#36VlC7pyL3*yM}9cu#Q#m!(740 zP{WH&1!7+_WDhE6x&XAYpJYFlF)(zY`jNGT1I>?YEgW6weq?XqAli?DootX0=wyY^ z;4*^)G%61hsbN9L)PPUG1F1>j05w%XRZy)Exgmv^S7&7CsbFD%g_I^I<SZ4GauL*0 z1(%D&chDhay)r1H6IB&~t|`K-il%|OfY7e@68Q~+8;m!U?ua;$c%tx%Vj#8>oHCbc zg32IJB>*aOQTj<pl@4~7P6xRZR9}MnfBG8~H#qNL+ffPW|0!YZ|AESOaG(Z&DjT-x z4EXvtplI)81SbtP=w1m{2#t^du}~`-@KzRRRSRxngUeia8~7G0=m-~sTcA^9;tLE3 z*SFZcJ`?113rK}CvvP&t?3$Gx8v<8*Uz9iBA#zd9_JG(4#)Fb4OhFUv78f0ZFN8*3 zbcnnt7kMEj_M&Vo^`nu31h)j#_VELyT2Ne4F%wXbw3dS06oA!DbV@@-L7<EQs#zcz z19=32V9r<$a(*bx`QWbNO6bTjc)AGORRp&&#EQUe4Aj0MBz5K*76pTh2m#H;ux6Bi z#yY?$8r~KG&jNw&T+2;0fKY~z%>sxGF`%9yctR08n1<9B1eGx0W=BGT0(>naxb*>U zc-=y3dVsP(Q5+KkLpP}Dp~%R<@c(xM=;m_{&JM?>vIn5OE#M6<NGDEyV&LGE>2SO+ z9@yb=Um~Q#^SMy$1)<mu=R2T7NI+NOtSDaWk2r?}wl(DnkJ1e&Td>#vgCGZ|%m)(& zUZDx99iRm!A7mK>g*#X=ZR6sUBf&NyanKzwGc^~uu25X)0lF?c<)Wm~hSVL78#8v4 z9Z1|+dC|<{gz808|BI6T7lMK>N(5g7ZMT8%ph0q`@Kqk!529S0avvlR?u2wrk!P1d z$rjv30nKD1x3WR=8SpL_6X<4!T4vC)4A77beDLcQKd2>`n^+NFXaL^Pw2}o}lDUNy zfp(4D;)GnXm77=r848ZiO{{>lq~UYOLQoSjb4zof6Q@O~ObiU0K#>Y6BpVpM@G!_| zuQ1&pc3Ij2bhS!kN5unfkqH$Sx#cgg$Yc5mlsmwF0u6Z~`w3gm0nuwfp5MWy644<h zAPWjpSQ`*Z*T7GCNnu<AUzZ7MLqS3o)H>2+hSd3*jG%ei%wkPu@C@%Q=G?>z@G1R8 ziJ<%svH{dE2j_{R1W<Ma5qm*l2D*K)f#C}ugP`zqwux*LN+<I+c;4X`>@VvqgG{&2 z0q+r9;5IvMgWwf0<IDUe4IU48L?>iiKnhS$AVM9_0Ahm!^b7cWs~R?FgS8kDBsI)6 ztgz%#i*zXiq5+F-8mtCBx<No4e5ov`W+k8&eq=BKb@1J*AT!}^X9OjDP$I8^tVUta ztEpkA<)~pqKICV(`WAFY45%>+@;~UpEKpdaGY2yiJpoNJGJ`WFI6*?zYjWLU$xW;< z1d~RHTA>KsH3g3*Le>L-h5<n;jln8Su<sWGl>?CRz5LwN^u(e(CI*I6pacy%FBGX< zf5NZ0Ky8KOWq#uhmOK24OO%#tEYw(Gx>$dU(sqrF8avD`8#!H6aynvk+~T0c36IOp zp;y8pu7|~442!!EpK&=X^P+R+W&W%VmIwU86I?ngXQ*7{lj{Ust0y4R!E%FN7;G%{ zt(Z}=z-4CTMLvxVX3(`zEE7U-+@%K^)B)$Vdra`DPSEW?g6%@>!tEm9DNUr&IYcV& zgr|I1!2-VC#*=}eOQ?gTgQ-KPgRz4ZI;klPS}T^0zQ>)tlNDP&?F3%~T+4wpddC70 zgV|KenZkm6<S5FR;YZM+R-I5CHSCZhtiWT2NK3xq@;tD-*d>BkNtlJ)v4)mQHOvq< z)G#Ao4FJx~(7A{dHt6LzP?b<VH>wVZXpv9~I}Q^;`;A!{7`g=WoN=h&VqidWD`*-X z>P8p^(+3MLLaLE{1k(fZ8K~dUCD<Vd?#?r$v7~UcaMbc5R-}PCkFf9qkCcKB|EfW{ zYZKclNyO2tHJqUQkK{VgI1tPeP`X9{?jmjm2Jm<*SI=h_&>=^ly)jT#FbaGqJO^Sa z10%x(#vUJ#>R<*<PSl&`Xxe8?&d)20FG(%R#WJz{8q_k@0o9|R&Ia08FnHV*a_BoK zZ=!TXK)XP|UB@U;*AZnaJ+@W?YR9q@J}QYiHP#8fVE_~nsGUggXeDHy{4GAWc-Q#+ zvecsZ#G=$h*CNnN3i#}RTg+wg2H3YZfr?msq3{n>+=3c{pee?r`DruL76z{nTpYH- z<FXw1#PZEG2mCIY2VIm41`S!t!X|z-xgpC>z?}&2$T4`n2|R8I$_tQ|3-|<l@Gzz( zXoMNu&k!vt1_e5}p8>8gK)njcsufVE-r_<y7d*5Hxwyz4qzANm1+ohOd;mUpdl#aE z0UqH5jpRc*81T>mAEyfHSR^EX8c<LS8r%f8r6BzaBuh(~7#JRbf@LbGfAIrc4+)7h zcp*(QeP)mX-QoHiG{b~zd=vG!MAZ2tKK>47_!)_Opqq&>_uxMf$Jm1}AOvdBO-P#& zI5Bep+k(msf(vUdDwtdpGr7!fx`XW!zuf_~6M_ddPo!OP3b_ygs{AfwW?hWWz8H~n zA;0ueMClDhl?7=l0vBd(VA}vXj=tuify+fTmy3$76Zsw}X{-=jsJX&zgXNCEjW!o` z94;z3Ko6T&0}UT&UUm+-5*mIzH2Pv_^o5wD3(4SHKa=w>#uQu*ExhPlcu}!v0^c*C z@DArY%AhNdj2GJ=b*%WO7)>{yXg<T@varg6po_xlYnaxvuVmlAx|;8zu-Qc(vrgv^ zvV5H89~2lkg|Bl+UgVIRp**8-rrI?Q#Sf-ZtV$m&7<j}ku!ut1|Db#bD{(-j2)F_T zZ8<@1|JQ)_Jb|tuW&pRsLBmU+CHt(1Dzt{VhNXr9w(B1G@D}iLbT+sQdkt$1JFLZi z4ZH#reAbmFGvwqb&^emmLmD(WAPpl>If`^R7Pzwv-k%IQNCMnjg3LlgQzRrc3W9Ph zWSZI_J~=-%Ev=}WiGhKGoq?ekbf!7DozE>WfpLPy4A7}dsT<<2aJhoEKgfY^>JGik ztqnS$N_0Zv4CWawmw6Q%+(3;Fwho&MEaH%m12yhpAqNT(aL9cFT{{RmPzSNf3O-#4 zZh){On*NB;>qKmafL6fJ2}96?Bx?s>2XhA_wE4l`!O+3h!Pmh876GlP66j#)K&SzY zTJxZ&0ZnG|f+q+;Q;JM!tf2K&wH!#J*bLA~Ne+|>5HeNBj%|#(20o?^9iwIkpO}DB zz=IMKs3`hO<pEbUP}HU-7UfjNg9b9<Q$eS-VGg@0vNJFgse{TpP{l+=m*E}-BhqC` zAeX_bOzOA{G$Dy+<qwfAQvtaQK5I%vmr)#Ms-TPuTSujAzJqax@{Y6vktYf-L_}Uu zj>0zn3d)|~^fw38zyMvw3CpYu*cuqvZ-2+VS_jmoWMb%KMGS+0njNUk5zOgSQ2!9t z21IG1K*uXk)}Vs+eiVTw8&M}$3k(s%e&ALB_9cNt#<@BuWI@Lgf|@Qy^DSpu&bOOs zx6*P$;A)$l90vq<b6?bUJ{WvL@Nn2gE$@?c7vkbC`zKsXF1S!wbUC^BqJQy4t&+>q zr5DO8E=pHGBcdphiGe{=5YZk1yA#{cXps#li`s$+I}mXY)WQJ|a)LZwR0k4M0Exjz z{EI<*JV7I!pvDNO&<C|apnJAOu$2u(^-K&55+HAaR^mX1Ix%Khxmgu4R=nJojOyTf zE>MAUeG7a^%T*4A58~XcilB4UVJlfQy>GGRmF5;yf;;}Vc#D%W3o7-B3ld8*6LX3{ zwZ$zSxL8qfY7uB~>K0dSVhL!|Ku%^-5vXnkZ$P*ujI2;ECo?ZGC%p(%=@fy+eTzUv zY!PVsq6oAQ4{{r^4=B_?_goi&Rss~2fw*lTq6<V!1QDQWz6g{(Aa|}W0f~Xzp4&kz z@Srxh*?Atsy$B*eH;zEI<b!tJ7a{iLgLmVDcE%Sug0>cTf(RcFkq9EvKtwi(04@3} znh0Xe0uggT1nAap@HNqpJ2gS~07I@J0UgQ@K3Ka5e5U~DP)Tsp0K8rnvNRL4D<87k z6tpb?y!;xx#0fmL4j$x&jJ<$H`M@J1MWBW$xG?}u7e%1?C2)NXZaDnnu*uC&Da}c> zD{2L8a{w(n1UdNwGb1D8Z3dyc3`%zyl<zXA++|Sz#3RY5{mD##QS+0UG^5ZbD^5nW zPh#ARe4k`Q83jJ+u`%*}5&#n+OpNY~pK@3<8HGPZFmf=8eiD&jH2Gx4%P99rfrXL( z6OS>Y^`|UGHb&`BLV}D!pESf7ML!uSFe-j>VH9Ol{G=zrDDufnj8Wv15y)r*0Y?5$ zDx!@1pY+5T`9GP6Fp7WDk!IxoWCbE@G#CXx#V~3y@_ve7)M6C)6ve2<DEujaQIwJQ zlK~GS|0fx45Fy1V@X1DmQTme(7bE{CDK19gPYT?O;-92A8TmhnaWL|K5=miX<oo2u ztiveqDUDH{QQ(6MV*n!~{|e*J3?TYb8j}*E&?hfOu$%c9^*$*HFv@;XQ)QI=6u_v< zDDWwRk&jXMlaeH(;3o?;M!`=(j4F(hpZpj(7!^K=aWN`<k`iT<{$!xcDE7&Rk(W{8 Lvw|q2AUHYz+Rpal literal 0 HcmV?d00001 diff --git a/2d/compute_errorestimates.py b/2d/compute_errorestimates.py new file mode 100644 index 0000000..c4e7ade --- /dev/null +++ b/2d/compute_errorestimates.py @@ -0,0 +1,252 @@ +import myfun as my +import numpy as np +import time +import pickle + +tic = time.time() + +# ---------------------------------------------------- COMPUTE A POSTERIORI ERROR ESTIMATES ----------------------------------------------------- +# This script computes the individual terms of the a posteriori error estimates \theta_K for each element K \in \mathca{T}_h + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'arithmetic-mean' +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'manufactured1' # 'diff', 'blow-up', 'manufactured' or 'manufactured1' +fineness = 0 # number of refinements of mesh before calculating numerical solution +Nt = 8 # number of time steps + + +# initialize different test cases +if test == 'blow-up' : +# CONVECTION DEOMINATED REGIME i.e. BLOW-UP + + def rho0(x) : + val = 10**(3)*np.exp(-((x[0]-0.5)**2+(x[1]-0.5)**2)/10**(-2)) + return val + + T = 0.0045 # time interval [0,T] + +elif test == 'diff' : +# DIFFUSION DOMINATED REGIME + def rho0(x) : + val = 1.3*np.exp(-25*(x[0]-1/2)**2-25*(x[1]-1/2)**2) + return val + + T = 0.05 # time interval [0,T]# + +elif test == 'manufactured' : +# MANUFACTURED SOLUTION WITH T = 0.5 + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + T = 0.5 # time interval [0,T]# + +elif test == 'manufactured1' : +# MANUFACTURED SOLUTION WITH T = 3 + + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + T = 3 # time interval [0,T]# + +else : + print('Warning: Wrong string input for test.') + + +# SCHEME/PROBLEM PARAMETERS +hx = 1/2 +ht = (T)/(Nt) + + +# initialize zerost mesh for FV approximations +[tri,points,nppoints,numtri] = my.initialmesh() +K = nppoints[tri.simplices] + +for i in range(fineness) : + hx = 1/2*hx + [tri,points,nppoints,numtri] = my.refinemesh(points, K, numtri) + K = nppoints[tri.simplices] + + +#get dual mesh for FE method to approximate chemical density +[K_dual, tri_dual, points_dual, nppoints_dual, numtri_dual] = my.getdualmesh(points, tri, fineness, K) +K_dual = np.array(K_dual) +[tri_dual,K_dual] = my.orientation_dualmesh(tri_dual,K_dual) # enforce positive orientation of all triangles in dual mesh + +# compute mesh topology for faster find_simplex routine for dual mesh +[oriented_edges,structure,edge_to_index] = my.get_edges_structure(K_dual) +my.init_indexE(numtri_dual) + +print('Compute error estimates') +print(interpol+'_'+boundary+'_'+method+'_'+test) +print('fineness',fineness) +print('numtri',numtri) +print('hx',hx) +print('ht',ht) +print('numtri_dual',numtri_dual) +print('Nt',Nt) + +max1 = 0 # need this for more efficiency calculating theta_omega +max2 = 0 # need this for more efficiency calculating theta_omega +for n in range(Nt) : # range(Nt) : + + progress = n/(Nt)*100 + print("%.2f" % progress, 'procent of progress made.') + + if n == 0: # slightly different setting at early times + + #GET ARTIFICIAL MORLEY0 + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_c at time step'+str(n)+'.p' + cc_0 = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # compute c related objects + v_0 = lambda x: my.grad_cfunc(cc_0,tri_dual,K_dual,points_dual,x,oriented_edges,structure,edge_to_index) # piecewiese constant + [lin_vx_0,lin_vy_0] = my.get_linv(K_dual,nppoints_dual,numtri_dual,tri_dual,v_0,nodal_avrg_choice) # get linear coefficients + Laplace_c_0 = lambda x : my.approx_Laplace_cn(lin_vx_0,lin_vy_0,x,oriented_edges,structure,edge_to_index) # approximation of Laplace(c)=div(v) + + v_m1 = lambda x : v_0(x) # as morley0 uses c0 instaed of c^(n-1) in n = 0 case + + # load data + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + [ht_0,rho_p1,qq0_p1,betaKE_p1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n)+'.p' + [rho_0,rho0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # get artificial morley interpolation + [qq0_0,betaKE_0] = my.get_morley0_coeff(rho_0,nppoints,numtri,tri,K) + + #dummy variables + rho_m1 = 0*rho_0 + ht_m1 = 0 + + # COMPUTE A POSTERIORI ERROR ESTIMATES + theta_early = my.get_early_time_error(K,tri,v_0,qq0_0,qq0_p1,betaKE_0,betaKE_p1) + print('extra term for early times computed.') + theta_res0 = my.get_theta_res(cc_0,Laplace_c_0,qq0_0,qq0_p1,betaKE_0,betaKE_p1,ht_0,K,tri,K_dual,tri_dual,points_dual,oriented_edges,structure,edge_to_index) + print('elementwise residual computed.') + theta_diff0 = my.get_theta_diff(qq0_0,betaKE_0,K,tri) + print('grad morley jump terms computed.') + [theta_time0,theta_time1] = my.get_theta_time(qq0_0,qq0_p1,betaKE_0,betaKE_p1,rho_m1,rho_0,rho_p1,ht_m1,ht_0,K,tri,n) + print('time contributions computed.') + [theta_Omega,max1,max2] = my.get_theta_omega(max1,max2,rho_0,rho_p1,betaKE_0,betaKE_p1,n) + print('global convective term computed.') + + # SAVE A POSTERIORI ERROR ESTIMATES + data = [] + data.append(theta_early) + data.append(theta_res0) + data.append(theta_diff0) + data.append(theta_time0) + data.append(theta_Omega) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + pickle.dump(data,open('pickle_files/'+pickle_name,'wb')) # store data + + + else : + + if n == 1: + + # time-update objects + cc_m1 = cc_0 + v_m1 = v_0 + Laplace_c_m1 = Laplace_c_0 + rho_m1 = rho_0 + rho_0 = rho_p1 + qq0_0 = qq0_p1 + betaKE_0 = betaKE_p1 + ht_m1 = ht_0 + + # load data + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_c at time step'+str(n)+'.p' + cc_0 = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + [ht_0,rho_p1,qq0_p1,betaKE_p1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # compute c related objects + v_0 = lambda x: my.grad_cfunc(cc_0,tri_dual,K_dual,points_dual,x,oriented_edges,structure,edge_to_index) # piecewiese constant + [lin_vx_0,lin_vy_0] = my.get_linv(K_dual,nppoints_dual,numtri_dual,tri_dual,v_0,nodal_avrg_choice) # get linear coefficients + Laplace_c_0 = lambda x : my.approx_Laplace_cn(lin_vx_0,lin_vy_0,x,oriented_edges,structure,edge_to_index) + + # dummy variables + betaKE_m1 = 0*np.array(betaKE_0) + v_m2 = lambda x : 0*np.array(v_m1(x)) + + + else : + + # time-update objects + v_m2 = v_m1 + v_m1 = v_0 + Laplace_c_m1 = Laplace_c_0 + rho_m1 = rho_0 + rho_0 = rho_p1 + qq0_0 = qq0_p1 + betaKE_m1 = betaKE_0 + betaKE_0 = betaKE_p1 + ht_m1 = ht_0 + + # load data + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_c at time step'+str(n)+'.p' + cc_0 = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + [ht_0,rho_p1,qq0_p1,betaKE_p1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # compute c related objects + v_0 = lambda x: my.grad_cfunc(cc_0,tri_dual,K_dual,points_dual,x,oriented_edges,structure,edge_to_index) # piecewiese constant + [lin_vx_0,lin_vy_0] = my.get_linv(K_dual,nppoints_dual,numtri_dual,tri_dual,v_0,nodal_avrg_choice) # get linear coefficients + Laplace_c_0 = lambda x : my.approx_Laplace_cn(lin_vx_0,lin_vy_0,x,oriented_edges,structure,edge_to_index) + + + # COMPUTE A POSTERIORI ERROR ESTIMATES + theta_res0 = my.get_theta_res(cc_m1,Laplace_c_m1,qq0_0,qq0_p1,betaKE_0,betaKE_p1,ht_0,K,tri,K_dual,tri_dual,points_dual,oriented_edges,structure,edge_to_index) + print('elementwise residual computed.') + theta_diff0 = my.get_theta_diff(qq0_0,betaKE_0,K,tri) + print('grad morley jump terms computed.') + [theta_time0,theta_time1] = my.get_theta_time(qq0_0,qq0_p1,betaKE_0,betaKE_p1,rho_m1,rho_0,rho_p1,ht_m1,ht_0,K,tri,n) + print('time contributions computed.') + conv_terms = my.get_conv_terms(v_0,qq0_p1,betaKE_p1,rho_p1,K,tri) + print('convective term computed.') + [theta_Omega,max1,max2] = my.get_theta_omega(max1,max2,rho_0,rho_p1,betaKE_0,betaKE_p1,n) + print('global convective term computed.') + + # SAVE A POSTERIORI ERROR ESTIMATES + data = [] + data.append(theta_res0) + data.append(theta_diff0) + data.append(theta_time0) + data.append(theta_time1) + data.append(conv_terms) + data.append(theta_Omega) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + pickle.dump(data,open('pickle_files/'+pickle_name,'wb')) # store data + +progress = 100 +print("%.2f" % progress, 'procent of progress made.') +elapsed = time.time() - tic +print('This took',"%.2f" % round(elapsed/60, 2), 'minutes.') + + diff --git a/2d/exact_error.py b/2d/exact_error.py new file mode 100644 index 0000000..95d8e57 --- /dev/null +++ b/2d/exact_error.py @@ -0,0 +1,201 @@ +import myfun as my +import numpy as np +import time +import pickle + +tic = time.time() + +# ----------------------------------------------- COMPUTE "EXACT" ERROR OF MANUFACTURED SOLUTION ----------------------------------------------------- +# This script computes the LinfL2-L2H1 norm of the "exact" error subject to manufactured solutions. + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'least-squares' +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'manufactured1' # 'diff' or 'blow-up' or 'manufactured' +fineness = 3 # number of refinements of mesh before calculating numerical solution +Nt = 8 # number of time steps, [1, 2, 3, 4, 6 ,8 , 16, 32, 64] + +# initialize different test cases; only works for manufactured solutions +if test == 'manufactured' : +# MANUFACTURED SOLUTION WITH T = 0.5 + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def grad_exact_rho(x,t) : + val = -1.3*120/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*np.array([x[0]-1/2,x[1]-1/2]) + return val + + T = 0.5 # time interval [0,T]# + +elif test == 'manufactured1' : +# MANUFACTURED SOLUTION WITH T = 3 + + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def grad_exact_rho(x,t) : + val = -1.3*120/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*np.array([x[0]-1/2,x[1]-1/2]) + return val + + T = 3 # time interval [0,T]# + +else : + print('Warning: Wrong string input for test.') + + +# SCHEME/PROBLEM PARAMETERS +hx = 1/2 +ht = (T)/(Nt) + +# initialize zerost mesh for FV approximations +[tri,points,nppoints,numtri] = my.initialmesh() +K = nppoints[tri.simplices] + +# refine zerost mesh subject to fineness +for i in range(fineness) : + hx = 1/2*hx + [tri,points,nppoints,numtri] = my.refinemesh(points, K, numtri) + K = nppoints[tri.simplices] + + +#get dual mesh for FE method to approximate chemical density +[K_dual, tri_dual, points_dual, nppoints_dual, numtri_dual] = my.getdualmesh(points, tri, fineness, K) +K_dual = np.array(K_dual) +[tri_dual,K_dual] = my.orientation_dualmesh(tri_dual,K_dual) # enforce positive orientation of all triangles in dual mesh + +# compute mesh topology for faster find_simplex routine for dual mesh +[oriented_edges,structure,edge_to_index] = my.get_edges_structure(np.array(K_dual)) +my.init_indexE(numtri_dual) + +print('Compute exact error') +print(interpol+'_'+boundary+'_'+method+'_'+test) +print('fineness',fineness) +print('numtri',numtri) +print('hx',hx) +print('ht',ht) +print('numtri_dual',numtri_dual) +print('Nt',Nt) + +# inititlize objects +L2_array = [] +L2H1_sum = 0 + +for n in range(Nt) : # range(Nt) : + + progress = n/(Nt)*100 + print("%.2f" % progress, 'procent of progress made.') + + if n == 0 : # slightly different setting at early times + + # load data + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n)+'.p' + [rho_0,rho0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + [ht_0,rho_p1,qq0_p1,betaKE_p1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + #GET ARTIFICIAL MORLEY0 + [qq0_0,betaKE_0] = my.get_morley0_coeff(rho_0,nppoints,numtri,tri,K) + + else : + + if n == 1: + + # time-update objects + qq0_0 = qq0_p1 + betaKE_0 = betaKE_p1 + + # load data + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + [ht_0,rho_p1,qq0_p1,betaKE_p1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + + else : + + # time-update objects + qq0_0 = qq0_p1 + betaKE_0 = betaKE_p1 + + # load data + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + [ht_0,rho_p1,qq0_p1,betaKE_p1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + + # COMPUTE RECONSTRUCTION OF MORLEY-TYPE FOR BOTH TIME STEPS, INCLUDUNG ITS GRAD + morley_interpol0 = lambda x : my.morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=-1) # use rho0 here? + grad_morley_interpol0 = lambda x : my.grad_morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=-1) + morley_interpol1 = lambda x : my.morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=-1) + grad_morley_interpol1 = lambda x : my.grad_morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=-1) + + # COMPUTE FULL RECONSTRUCTION (time and space), INCLUDING ITS GRAD + morley_spacetime = lambda x,t : morley_interpol1(x)*(t-n*ht)/((n+1)*ht-n*ht) - morley_interpol0(x)*(t-(n+1)*ht)/((n+1)*ht-n*ht) + aux_error = lambda x,t : (exact_rho(x,t)-morley_spacetime(x,t))**2 # L^2 in space + grad_morley_spacetime = lambda x,t : grad_morley_interpol1(x)*(t-n*ht)/((n+1)*ht-n*ht) - grad_morley_interpol0(x)*(t-(n+1)*ht)/((n+1)*ht-n*ht) + grad_aux_error = lambda x,t : np.linalg.norm(grad_exact_rho(x,t)-grad_morley_spacetime(x,t))**2 # H^1 in space + + # COMPUTE L2 NORM in space + aux = lambda x : aux_error(x,n*ht) # the maximum is attaind at time steps t^n = n*ht + intL2 = my.get_integral_Omega(aux) # integral over total domain + L2_array.append(intL2) + + if n == Nt-1 : + aux = lambda x : aux_error(x,(n+1)*ht) # the maximum is attaind at time steps t^n = n*ht + intL2 = my.get_integral_Omega(aux) # integral over total domain + L2_array.append(intL2) + + # COMPUTE L2H1 NORM + maxiter = np.lcm.reduce([1, 2, 3, 4, 6 ,8 , 16, 32, 64]) # get maxiter as lowest common multiple of numbers of time steps used + for m in range(int(maxiter/Nt)) : # subintervals + I = np.array([(maxiter/Nt*n+m)*T/maxiter,((maxiter/Nt*n+m)+1)*T/maxiter]) + + #quadrature rule on each subinterval + areaI = I[1]-I[0] + trafo = lambda y : y*(I[1]-I[0])/2+(I[1]+I[0])/2 # y in [-1,1] + ti = np.array([-np.sqrt(1/3),np.sqrt(1/3)]) #on [-1,1] + wi = [1,1] + val = 0 + for k in range(len(wi)) : + grad_aux = lambda x : grad_aux_error(x,trafo(ti[k])) + intH1 = my.get_integral_Omega(grad_aux) + val += wi[k]*intH1 + + integral = val/2*areaI # get integral + + L2H1_sum += np.array(integral) + +LinfL2 = np.max(L2_array) # compute Linf in time + +# SAVE 'EXACT' ERROR IN LinfL2-L2H1 NORM +data = [] +data.append(L2_array) +data.append(LinfL2) +data.append(L2H1_sum) +print(data) +pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_exact error.p' +pickle.dump(data,open('pickle_files/'+pickle_name,'wb')) # store data + +progress = 100 +print("%.2f" % progress, 'procent of progress made.') +elapsed = time.time() - tic +print('This took',"%.2f" % round(elapsed/60, 2), 'minutes.') \ No newline at end of file diff --git a/2d/main_morley.py b/2d/main_morley.py new file mode 100644 index 0000000..929dc8a --- /dev/null +++ b/2d/main_morley.py @@ -0,0 +1,304 @@ +import myfun as my +import numpy as np +import time +import pickle + +# ---------------------------------------------------- FV-FE ALGORITHM ------------------------------------------------------------------------- +# This script performs a FV-FE Algroithm, displayed in the master thesis, for the parabolic-elliptic Keller-Segel system with an initial datum and +# the homogeneous Neumann boundary condition + +tic = time.time() + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'least-squares' # 'arithmetic-mean' or 'least-squares' -> see nasa paper +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'manufactured1' #'diff', 'blow-up', 'manufactured' or 'manufactured1' +fineness = 3 # number of refinements of mesh before calculating numerical solution +Nt = 8 # number of time steps + +print('FV-FE Algorithm') +print(interpol+'_'+boundary+'_'+method+'_'+test) +print('fineness',fineness) +print('Nt',Nt) + +# initialize different test cases +if test == 'blow-up' : +# CONVECTION DEOMINATED REGIME i.e. BLOW-UP + + def rho0(x) : + val = 10**(3)*np.exp(-((x[0]-0.5)**2+(x[1]-0.5)**2)/10**(-2)) + return val + + def f(x,t) : + val = 0 + return val + + def g(x,t) : + val = 0 + return val + + T = 0.0045 # time interval [0,T] + +elif test == 'diff' : +# DIFFUSION DOMINATED REGIME + def rho0(x) : + val = 1.3*np.exp(-25*(x[0]-1/2)**2-25*(x[1]-1/2)**2) + return val + + def f(x,t) : + val = 0 + return val + + def g(x,t) : + val = 0 + return val + + T = 0.05 # time interval [0,T]# + +elif test == 'manufactured' : +# MANUFACTURED SOLUTION WITH T = 0.5 + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def delt_exact_rho(x,t) : + val = -1.3/(1+t)**2*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def grad_exact_rho(x,t) : + val = -1.3*120/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*np.array([x[0]-1/2,x[1]-1/2]) + return val + + def grad_exact_c(x) : + val = -1.3*120*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*np.array([x[0]-1/2,x[1]-1/2]) + return val + + def Laplacian_exact_rho(x,t) : + val = 1/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*(9048-18720*x[0]+18720*x[0]**2-18720*x[1]+18720*x[1]**2) + return val + + def Laplacian_exact_c(x) : + val = np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*(9048-18720*x[0]+18720*x[0]**2-18720*x[1]+18720*x[1]**2) + return val + + + def f(x,t) : + val = delt_exact_rho(x,t) + np.dot(grad_exact_c(x),grad_exact_rho(x,t)) + exact_rho(x,t)*Laplacian_exact_c(x) - Laplacian_exact_rho(x,t) + return val + + def g(x,t) : + val = exact_c(x) - Laplacian_exact_c(x) - exact_rho(x,t) + return val + + T = 0.5 # time interval [0,T]# + +elif test == 'manufactured1' : +# MANUFACTURED SOLUTION WITH T = 3 + + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def delt_exact_rho(x,t) : + val = -1.3/(1+t)**2*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def grad_exact_rho(x,t) : + val = -1.3*120/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*np.array([x[0]-1/2,x[1]-1/2]) + return val + + def grad_exact_c(x) : + val = -1.3*120*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*np.array([x[0]-1/2,x[1]-1/2]) + return val + + def Laplacian_exact_rho(x,t) : + val = 1/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*(9048-18720*x[0]+18720*x[0]**2-18720*x[1]+18720*x[1]**2) + return val + + def Laplacian_exact_c(x) : + val = np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2)*(9048-18720*x[0]+18720*x[0]**2-18720*x[1]+18720*x[1]**2) + return val + + + def f(x,t) : + val = delt_exact_rho(x,t) + np.dot(grad_exact_c(x),grad_exact_rho(x,t)) + exact_rho(x,t)*Laplacian_exact_c(x) - Laplacian_exact_rho(x,t) + return val + + def g(x,t) : + val = exact_c(x) - Laplacian_exact_c(x) - exact_rho(x,t) + return val + + T = 3 # time interval [0,T]# + +else : + print('Warning: Wrong string input for test.') + +toc = time.time() + +# SCHEME/PROBLEM PARAMETERS +hx = 1/2 +ht = (T)/(Nt) + +# GET PRIMAL MESH : +# initialize zerost mesh for FV approximations +[tri,points,nppoints,numtri] = my.initialmesh() +K = nppoints[tri.simplices] + +# refine zerost mesh subject to fineness +for i in range(fineness) : + hx = 1/2*hx + [tri,points,nppoints,numtri] = my.refinemesh(points, K, numtri) + K = nppoints[tri.simplices] + +#get dual mesh for FE method to approximate chemical density +[K_dual, tri_dual, points_dual, nppoints_dual, numtri_dual] = my.getdualmesh(points, tri, fineness, K) +K_dual = np.array(K_dual) + +[tri_dual,K_dual] = my.orientation_dualmesh(tri_dual,K_dual) # enforce positive orientation of all triangles in dual mesh + +# compute mesh topology for faster find_simplex routine for dual mesh +[oriented_edges,structure,edge_to_index] = my.get_edges_structure(np.array(K_dual)) +my.init_indexE(numtri_dual) + +elapsed = time.time() - toc +print('Mesh topology computed in ',"%.2f" % round(elapsed/60, 2), 'minutes.') +toc = time.time() + +# print most important information regarding problem/scheme +print('numtri',numtri) +print('numtri_dual',numtri_dual) +print('Nt',Nt) +print('hx',hx) +print('ht',ht) + +# pre-allocate variables +rho = np.zeros([Nt+1,numtri]) +cc = np.zeros([Nt+1,len(points)]) + +toc = time.time() + +A = my.assemble_FE_matrix1(tri_dual,K_dual,points_dual) +elapsed = time.time() - toc +print('FE matrix assembled in ',"%.2f" % round(elapsed/60, 2), 'minutes.') +toc = time.time() + +# calculate c0 with rho0 as right-hand side +c0 = my.getc_FE1(rho0,lambda x: g(x,0) ,tri_dual,K_dual,points_dual,A) +cc[0][:] = c0 +elapsed = time.time() - toc +print('Initial chemical concentration approximated via FE scheme in ',"%.2f" % round(elapsed/60, 2), 'minutes.') +toc = time.time() + +# get discrete rho0 by evaluating at circumcenters x_K +for i in range(len(K)) : + CC = my.circumcenter(K[i]) + rho[0][i] = rho0(CC) + +# SAVE INITIAL RHO +data = [] +data.append(rho[0][:]) +data.append(rho0) +pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(0)+'.p' +pickle.dump(data,open('pickle_files/'+pickle_name,'wb')) # store data + +hht = [0] + +#flag = False # for adaptive time stepping +for n in range(Nt) : # range(Nt) : + toc = time.time() + + progress = n/Nt*100 + print("%.2f" % progress, 'procent of progress made.') + + # SAVE DATA CHEMICAL CONCENTRATION + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_c at time step'+str(n)+'.p' + pickle.dump(cc[n][:],open('pickle_files/'+pickle_name,'wb')) # store data + + # GET RHO VIA FINITE VOLUME SCHEME + # set v = gradient c + v = lambda x: my.grad_cfunc(cc[n][:],tri_dual,K_dual,points_dual,x,oriented_edges,structure,edge_to_index) # piecewiese constant + elapsed = time.time() - toc + print('Convection term initialized via v in ',"%.2f" % round(elapsed/60, 2), 'minutes.') + toc = time.time() + + # # ADAPTIVE TIME STEPPING + # if (T-hht[-1]) < ht : # make sure we do not overshoot time interval [0,T] + # ht = np.abs(T-hht[-1]) + # flag = True + # else : + # ht = my.get_timestep(v,K,tri) + + # hht.append(hht[-1]+ht) # list of t^n + # print('time-step',hht) + + # EXECUTE FINITE VOLUME SCHEME + rho[n+1][:] = my.getrho(rho[n][:],lambda x : f(x,n*ht),ht,v,tri,K,numtri) + elapsed = time.time() - toc + print('Bacterial denisty computed via FV scheme in ', "%.2f" % round(elapsed/60, 2), 'minutes.') + toc = time.time() + + # GET COEFFICIENTS FOR MORLEY INTERPOLATION : + FKED = my.getinterpolationRHS(tri,K,rho[n+1][:]) + elapsed = time.time() - toc + print('RHS for Morley coefficients calculated in ', "%.2f" % round(elapsed/60, 2), 'minutes.') + toc = time.time() + + qq0 = my.getq0(K,nppoints,numtri,tri,rho[n+1][:],nodal_avrg_choice) # linear interpolation + elapsed = time.time() - toc + print('Linear interpolation q0 computed in ', "%.2f" % round(elapsed/60, 2), 'minutes.') + toc = time.time() + + betaKE = my.getbetaE(K,qq0,FKED) # Jacobi_cdotn is first derivative of componentwise linear interpolation of grad c + elapsed = time.time() - toc + print('Morley coefficients computed in ', "%.2f" % round(elapsed/60, 2), 'minutes.') + toc = time.time() + + # GET INTERPOLANT FOR RHO : + if test == 'manufactured1' or 'manufactured' : + interpol_rho = lambda x : 0 + else : + interpol_rho = lambda x: my.morley_interpol(K,tri,qq0,betaKE,x,indexK=-1) # morley interpolation + + # SAVE BACTERIAL DENSITY RHO + data = [] + data.append(ht) # time step size Delta t^n := t^{n+1} - t^n + data.append(rho[n+1][:]) + data.append(qq0) + data.append(betaKE) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n+1)+'.p' + pickle.dump(data,open('pickle_files/'+pickle_name,'wb')) # store data + + # GET CHEMICAL DENISTY + cc[n+1][:] = my.getc_FE1(interpol_rho,lambda x : g(x,(n+1)*ht),tri_dual,K_dual,points_dual,A) + elapsed = time.time() - toc + print('Chemical concentration approximated via FE scheme in ', "%.2f" % round(elapsed/60, 2), 'minutes.') + + if n == Nt-1 : + # SAVE DATA CHEMICAL CONCENTRATION + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_c at time step'+str(n+1)+'.p' + pickle.dump(cc[n][:],open('pickle_files/'+pickle_name,'wb')) # store data + +progress = 100 +print("%.2f" % progress, 'procent of progress made.') +elapsed = time.time() - tic +print('This took',"%.2f" % round(elapsed/60, 2), 'minutes.') + diff --git a/2d/myfun.py b/2d/myfun.py new file mode 100644 index 0000000..11ae8ac --- /dev/null +++ b/2d/myfun.py @@ -0,0 +1,1837 @@ +import numpy as np +from scipy.spatial import Delaunay +from scipy.sparse import csr_matrix +from matplotlib import pyplot as plt +from scipy.sparse.linalg import spsolve + + + +# GENERAL FUNCTIONS + +def initialmesh() : +# input : None +# output : a very coarse triangulation (mesh size = 1/2) with property that all inner angles of the triangles are < pi/2 + + points = [[0,0],[1,0],[0,1],[1,1]] + nppoints = np.array(points) + + points.append(1/2*(nppoints[0]+nppoints[1])) + points.append(1/2*(nppoints[0]+nppoints[2])) + points.append(1/2*(nppoints[3]+nppoints[1])) + points.append(1/2*(nppoints[3]+nppoints[2])) + points.append(1/3*(nppoints[0]+nppoints[1]+nppoints[2])) + points.append(1/3*(nppoints[3]+nppoints[1]+nppoints[2])) + points.append(29/96*nppoints[1]+67/96*nppoints[2]) # where we use 7/24 as the mean of 1/3 und 1/4, to avoid angles of 90° + points.append(67/96*nppoints[1]+29/96*nppoints[2]) + + nppoints = np.array(points) + + tri = Delaunay(points) # d=2 + # tri.simplices gives indices that form the triangles + # points[tri.simplices] gives vertices of the mesh elements + # see documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.Delaunay.html + + numtri = 14 # number of triangles in mesh + + return [tri, points, nppoints, numtri] + +def refinemesh(points, K, numtri) : +# input : points that occur in triangulation, triangulation K (array of triangles), numtri number of triangles in triangulation +# output : delaunay triangulation tri, points, nppoints (=points in format np.array), number of triangles numtri + for i in range(len(K)) : + newpoints = [1/2*(K[i][0]+K[i][1]), 1/2*(K[i][0]+K[i][2]), 1/2*(K[i][1]+K[i][2])] + numtri += 3 + for j in range(3) : + points.append(newpoints[j]) + + auxtpl = list(set([tuple(x) for x in points])) # remove duplicates + points = [list(ele) for ele in auxtpl] + nppoints = np.array(points) + + tri = Delaunay(points) + K = nppoints[tri.simplices] + + return [tri, points,nppoints,numtri] + + +def getangles(K): +# need this for function circumcenter(...) + +# input : triangle K +# output : radian angles of triangle order as the vertices in K, number of triangles in mesh numtri + + A = K[0] + B = K[1] + C = K[2] + + # Square of lengths be a2, b2, c2 + a2 = lengthSquare(B, C) + b2 = lengthSquare(A, C) + c2 = lengthSquare(A, B) + + # length of sides be a, b, c + a = np.sqrt(a2); + b = np.sqrt(b2); + c = np.sqrt(c2); + + # From Cosine law + alpha = np.arccos((b2 + c2 - a2)/(2 * b * c)); + beta = np.arccos((a2 + c2 - b2)/(2 * a * c)); + gamma = np.arccos((a2 + b2 - c2)/(2 * a * b)); + + angles = [alpha, beta, gamma] + + return angles + +def lengthSquare(X, Y): +# need this for function circumcenter(...) + +# input : points X and Y +# output : squared distance between X and Y + + xDiff = X[0] - Y[0] + yDiff = X[1] - Y[1] + + return xDiff * xDiff + yDiff * yDiff + +def circumcenter(K) : +# input : triangle K +# output : circumcenter CC, which can be characterized as the intersection of the perpendicular bisectors of the triangle K + + angles = getangles(K) + + # circumcenter forular from https://testbook.com/maths/circumcenter-of-a-triangle + CC = np.array([(K[0][0]*np.sin(2*angles[0])+K[1][0]*np.sin(2*angles[1])+K[2][0]*np.sin(2*angles[2])), + (K[0][1]*np.sin(2*angles[0])+K[1][1]*np.sin(2*angles[1])+K[2][1]*np.sin(2*angles[2]))]) + CC = 1/(np.sin(2*angles[0])+np.sin(2*angles[1])+np.sin(2*angles[2]))*CC + + return CC + + +def are_neighbors(Ki,Li) : + # need this for function getdualmesh(...) + + # input: triangles Ki and Li + # output: true or false, indicating if Ki and Li are neighbors + + aux = len(list(set([Ki[0],Ki[1],Ki[2],Li[0],Li[1],Li[2]]))) + + if aux == 3 : + print('Warning: Triangles are the same') + flag = False + + elif aux == 4 : # neighbors by edge + flag = True + + elif aux == 5 : # neighbors by node + flag = False + + else : # no neighbors + flag = False + + return flag + +def linear_search(list, x): + # need this for function getdualmesh(...) + # basic line search algorithm for 2 dim vectors + for i in range(len(list)): + if list[i][0] == x[0] and list[i][1] == x[1]: + return i + return -1 + +def linear_search_edges(list, x): + # need this for function find_simlex1(...) + # basic line search algorithm for edges + for i in range(len(list)): + if list[i][0][0] == x[0][0] and list[i][0][1] == x[0][1] and list[i][1][0] == x[1][0] and list[i][1][1] == x[1][1]: + return i + return -1 + +def getdualmesh(points, tri, fineness, K): + # input: primal mesh and some of its parameters + # output: dual mesh for the FE scheme to induce no jumps for (primal) normal derivatives + + points_primal = len(points) + + hx = 1/2**(fineness+1) + points_dual = points + nppoints = np.array(points) # important that we initialite this here as list points is changed below... + + for i in range(len(K)) : + points_dual.append(circumcenter(K[i])) + + nppoints_dual = np.array(points_dual) + + pt_i = -1 + K_dual = [] + + class myclass : + def __init__(self) : + self.simplices = [] + + tri_dual = myclass() + + for pt in nppoints : # go through all points in primal mesh + pt_i +=1 + + neighbors = find_neighbors(pt_i,tri) + taken_care_of = [] + + for i in neighbors : + taken_care_of.append(i) + + for j in neighbors : + + if j in taken_care_of : + continue + + if are_neighbors(tri.simplices[i],tri.simplices[j]) : + CCi = circumcenter(K[i]) + CCj = circumcenter(K[j]) + + K_dual.append([pt,CCi,CCj]) + tri_dual.simplices.append([pt_i,points_primal+i,points_primal+j]) + + # extra triangles on boundary + if pt[0] == 0 : # bdr edge x = 0 + if pt[1] == 0: # corner [0,0] + x = pt+np.array([hx,0]) + y = pt+np.array([0,hx]) + + midx = 1/2*(pt+x) + midy = 1/2*(pt+y) + + i = tri.find_simplex(midx+10**-8*np.array([0,1])) + j = tri.find_simplex(midy+10**-8*np.array([1,0])) + + CCi = circumcenter(K[i]) + CCj = circumcenter(K[j]) + + K_dual.append([pt,x,CCi]) + x_i = linear_search(nppoints,x) + tri_dual.simplices.append([pt_i,x_i,points_primal+i]) + + K_dual.append([pt,y,CCj]) + y_i = linear_search(nppoints,y) + tri_dual.simplices.append([pt_i,y_i,points_primal+j]) + + elif pt[1] == 1 : # corner [0,1] + x = pt+np.array([hx,0]) + + midx = 1/2*(pt+x) + + i = tri.find_simplex(midx-10**-8*np.array([0,1])) + + CCi = circumcenter(K[i]) + x_i = linear_search(nppoints,x) + tri_dual.simplices.append([pt_i,x_i,points_primal+i]) + K_dual.append([pt,x,CCi]) + + else : # no corner + x = pt+np.array([0,hx]) + + midx = 1/2*(pt+x) + + i = tri.find_simplex(midx+10**-8*np.array([1,0])) + + CCi = circumcenter(K[i]) + x_i = linear_search(nppoints,x) + tri_dual.simplices.append([pt_i,x_i,points_primal+i]) + K_dual.append([pt,x,CCi]) + + elif pt[0] == 1 : # bdr edge x = 1 + + if pt[1] == 0: # corner [1,0] + y = pt+np.array([0,hx]) + + midy = 1/2*(pt+y) + + j = tri.find_simplex(midy-10**-8*np.array([1,0])) + + CCj = circumcenter(K[j]) + y_i = linear_search(nppoints,y) + tri_dual.simplices.append([pt_i,y_i,points_primal+j]) + K_dual.append([pt,y,CCj]) + + elif pt[1] == 1 : # corner [1,1] + nothing = 1 + + else : # no corner + x = pt+np.array([0,hx]) + + midx = 1/2*(pt+x) + + i = tri.find_simplex(midx-10**-8*np.array([1,0])) + + CCi = circumcenter(K[i]) + x_i = linear_search(nppoints,x) + tri_dual.simplices.append([pt_i,x_i,points_primal+i]) + K_dual.append([pt,x,CCi]) + + elif pt[1] == 0: # bdr edge y = 0 + x = pt+np.array([hx,0]) + + midx = 1/2*(pt+x) + + i = tri.find_simplex(midx+10**-8*np.array([0,1])) + + CCi = circumcenter(K[i]) + x_i = linear_search(nppoints,x) + tri_dual.simplices.append([pt_i,x_i,points_primal+i]) + K_dual.append([pt,x,CCi]) + + elif pt[1] == 1: # bdr edge y = 1 + x = pt+np.array([hx,0]) + + midx = 1/2*(pt+x) + + i = tri.find_simplex(midx-10**-8*np.array([0,1])) + + CCi = circumcenter(K[i]) + x_i = linear_search(nppoints,x) + tri_dual.simplices.append([pt_i,x_i,points_primal+i]) + K_dual.append([pt,x,CCi]) + + + numtri_dual = len(K_dual) + + return [K_dual, tri_dual, points_dual, nppoints_dual, numtri_dual] + +def orientation_dualmesh(tri,K) : +# input: dual mesh +# output: dual mesh, s.t. all triangles are oriented positively, i.e. the vertices that define a triangle are ordered counterclockwise + + poss = [[0,1,2],[0,2,1],[1,0,2],[1,2,0],[2,1,0],[2,0,1]] + for i in range(len(K)) : + for k in range(12) : + j = np.mod(k,6) + flag = -2 + + tau1 = K[i][poss[j][1]]-K[i][poss[j][0]] + tau2 = K[i][poss[j][2]]-K[i][poss[j][0]] + + if (tau1[0] > 0 and tau1[1] > 0) : + flag += 1 + + if k < 6 : + tau1_perp1 = [-tau1[1],tau1[0]] + else : + tau1_perp1 = [tau1[1],-tau1[0]] + + if np.dot(tau1_perp1,tau2) > 0 : + flag += 1 + + if flag == 0 : + + if k < 6 : + oldtri = np.array(tri.simplices[i]) + K[i] = K[i][poss[j]] + tri.simplices[i] = oldtri[poss[j]] + else : + aux_poss = [poss[j][0],poss[j][2],poss[j][1]] + oldtri = np.array(tri.simplices[i]) + K[i] = K[i][aux_poss] + tri.simplices[i] = oldtri[aux_poss] + + break + + elif k == 11 : + print('Warning: orientation does not work') + + return [tri,K] + + +def RightOf(x,E) : + # need tis for function get_edges_structure(...) + + # says if a point is on the right of an oriented edge + val = (E[1][0] - E[0][0])*(x[1] - E[0][1]) - (E[1][1] - E[0][1])*(x[0] - E[0][0]) + if val < 0 : + return True + else : + return False + +def get_edges_structure(K) : +# need this for more efficient find_simplex1 for the dual mesh + +# input: dual mesh in terms of K +# output: mesh topology of the dual mesh + + oriented_edges = [] + structure = [] + edge_to_index = [] + + for i in range(len(K)) : + indices = [[1,2],[0,2],[0,1]] + for j in range(3) : + + E = K[i][indices[j]] + pt = K[i][j] + + if RightOf(pt,E) : + E = [E[1],E[0]] + + oriented_edges.append(E) + structure.append([[E[0],pt],[pt,E[1]]]) + edge_to_index.append(i) + + return [oriented_edges,structure,edge_to_index] + + +def find_neighbors(pindex, tri): +# input : index of point pindex, delaunay triangulation tri +# output : indices of triangles that point pindex is part of + + #neighborstri = [] + neighborstri_index = [] + i = -1 + for simplex in tri.simplices: + i += 1 + if pindex in simplex: + #neighborstri.append(simplex) + neighborstri_index.append(i) + + return neighborstri_index + + +def min_dist(E,x) : +# need this for function find_simplex1(...) + # get minimal distance between edge E and point x + val = np.abs(np.cross(E[1]-E[0], E[0]-x))/np.linalg.norm(E[1]-E[0]) + return val + +def find_simplex1(x,oriented_edges,structure,indexE) : +#input: point x, mesh topology of the dual mesh +# output: edge E on which x lies or x lies in the triangle left of E + + # algorithm cannot deal with edges so we weak it a bit + if x[0] == 0 : + x[0] = 10**-10 + if x[1] == 0 : + x[1] = 10**-10 + if x[0] == 1 : + x[0] = 1-10**-10 + if x[1] == 1 : + x[1] = 1-10**-10 + + E = oriented_edges[indexE] + + if RightOf(x, E) : + E = [E[1],E[0]] + + indexE = linear_search_edges(oriented_edges,E) + + list_indices = [] + + while True : + if indexE in list_indices : + print('x',x) + print('Warning: loop.') + break + list_indices.append(indexE) + + if (x[0] == E[0][0] and x[1] == E[0][1]) or (x[0] == E[1][0] and x[1] == E[1][1]) : + return indexE + else : + whichop = 0 + if not(RightOf(x, structure[indexE][0])) : # Onext + whichop += 1 + if not(RightOf(x, structure[indexE][1])) : #Dprev + whichop += 2 + + if whichop == 0 : + return indexE + elif whichop == 1: + E = structure[indexE][0] # Onext + elif whichop == 2: + E = structure[indexE][1] # Dprev + elif whichop == 3: + if min_dist(structure[indexE][0], x) < min_dist(structure[indexE][1], x) : + E = structure[indexE][0] # Onext + else : + E = structure[indexE][1] # Dprev + + indexE = linear_search_edges(oriented_edges,E) + +def init_indexE(numtri_dual) : +# initialize global variable indexE for more performent find_simplex1 + global indexE + indexE = round(3*numtri_dual/2) + # global counter + # counter = np.zeros(3*numtri_dual) + print('indexE',indexE) + + +def unitouternormal(K,E) : +# input: element K and edge E in terms of vertices, 2-dim vector x that lies on E +# output: unit outward normal vector to K along E + + # calculate normal + edge = E[1]-E[0] + normal = np.array([edge[1],-edge[0]]) # np.dot(normal,edge) =!= 0 + normal = 1/np.linalg.norm(normal)*normal # normalization + + # check for orientation + center = (K[0]+K[1]+K[2])/3 # convex compination of vertices (always in interior triangle) + lengthplus = np.linalg.norm((E[0]+E[1])/2+normal-center) + lengthminus = np.linalg.norm((E[0]+E[1])/2-normal-center) + + if lengthplus<lengthminus : + normal = -normal + + return normal + + +def tritrafo(K,L,pt) : +# need this for integrating via function avrgK(...) + +# input : triangles K and L, point pt from K +# output : pt transformed to L + + A = np.array([[K[0][0], K[1][0], K[2][0]], # | xa1 xa2 xa3 | + [K[0][1], K[1][1], K[2][1]], #A =| ya1 ya2 ya3 | + [ 1, 1, 1]]) # | 1 1 1 | + B = np.array([[L[0][0], L[1][0], L[2][0]], # | xa1 xa2 xa3 | + [L[0][1], L[1][1], L[2][1]], #A =| ya1 ya2 ya3 | + [ 1, 1, 1]]) # | 1 1 1 | + + + invA = np.linalg.solve(A,np.eye(3)) + #invA = np.linalg.inv(A) + M = np.matmul(B,invA) + + auxx = np.array([pt[0],pt[1],1]) + + auxtrafo = np.matmul(M,auxx) + trafo = [auxtrafo[0],auxtrafo[1]] + + return [trafo,M] + +def avrgK(K,g) : +# input : element K in terms of vertices, function g +# output : average of g:IR²->IR over K, area of K + S = np.cross(K[1]-K[0],K[2]-K[0]) # B = K[0], C = K[1], A = K[2] + areaK = np.abs(S)/2 # get area of triangle K + + # wi = [1] (exactness 1 -> Order 2) + # xi = np.array([[1/3,1/3]]) + + # from https://mathsfromnothing.au/triangle-quadrature-rules/?i=1 + # wi = [0.223381589678011,0.223381589678011,0.223381589678011,0.109951743655322,0.109951743655322,0.109951743655322] + # xi = np.array([[0.445948490915965,0.108103018168070], + # [0.445948490915965,0.445948490915965], + # [0.108103018168070,0.445948490915965], + # [0.091576213509771,0.816847572980459], + # [0.091576213509771,0.091576213509771], + # [0.816847572980459,0.091576213509771]]) + + + wi = [1/6,1/6,1/6] # weights*area_unitK(=1/2) (Exactness 2 -> Order 3) + xi = np.array([[1/6,2/3],[1/6,1/6],[2/3,1/6]]) # points in unit triangle + + unitK = [[0,0],[1,0],[0,1]] + + val = 0 + for i in range(len(wi)) : + [trafoxi,M] = tritrafo(unitK,K,xi[i]) + val += wi[i]*g(trafoxi) + + avrg = val*2 + integral = avrg*areaK + + return [integral,avrg,areaK] + +def avrgE(E,g) : + # input : edge E in terms of vertices, function g + # output : average of g:IR²->IR over E, area of E + + areaE = np.linalg.norm(E[0]-E[1]) + + trafo = lambda y : y*(E[1]-E[0])/2+(E[1]+E[0])/2 # y in [-1,1] + + xi = np.array([-np.sqrt(1/3),np.sqrt(1/3)]) #on [-1,1] + wi = [1,1] + + # xi = np.array([-np.sqrt(3/5),0,np.sqrt(3/5)]) + # wi = [5/9,8/9,5/9] + + val = 0 + for k in range(len(wi)) : + val += wi[k]*g(trafo(xi[k])) # get integral + + avrg = val/2 # get average + integral = avrg*areaE + + return [integral,avrg,areaE] + +def quadrature(I,g) : +# inupt: interval I = [a,b], function g +# output: approximated integral of g over I + + areaI = I[1]-I[0] + + trafo = lambda y : y*(I[1]-I[0])/2+(I[1]+I[0])/2 # y in [-1,1] + + xi = np.array([-np.sqrt(1/3),np.sqrt(1/3)]) #on [-1,1] + wi = [1,1] + + # xi = [0] + # wi = [2] + + # xi = np.array([-np.sqrt(3/5),0,np.sqrt(3/5)]) #on [-1,1] + # wi = [5/9,8/9,5/9] + + val = 0 + for k in range(len(wi)) : + val += wi[k]*g(trafo(xi[k])) # get integral + + avrg = val/2 # get average + integral = avrg*areaI + + return integral + + +def get_integral_Omega(g) : +# input: function g +# output: integral of g over Omega + + # get initial mesh for quadrature (different to one that we use to calculate numerical solution to avoid unwanted effects due to projection errors) + points = [[0,0],[1,0],[0.5,0.5],[0,1],[1,1]] + nppoints = np.array(points) + tri = Delaunay(points) + K = nppoints[tri.simplices] + numtri = 2 + + # refine the mesh until it is finer than mesh used to calculate numerical solution, here 5 + for i in range(5) : + [tri,points,nppoints,numtri] = refinemesh(points, K, numtri) + K = nppoints[tri.simplices] + + # calculate integral over Omega as sum over inetrgals on K + integral = 0 + for i in range(numtri) : + [aux,avrg,areaK] = avrgK(K[i],g) + integral += aux + + return integral + + +def newton_method(f,grad_f,x0,tol,maxiter) : +# classical newton method +# Input: function f, Jacobian of f in sparse form, starting point x0, tolerance tol and maximal amount of iteration maxiter +# output: root of f + + xnew = x0 + for i in range(maxiter) : + xold = xnew + + delx = spsolve(grad_f(xold), -f(xold), permc_spec=None, use_umfpack=True) + + xnew = xold + delx + + if np.linalg.norm(xold-xnew) < tol : + break + + print('Newton steps:',i) + + if i == maxiter-1 : + print('Warning: Newton-method reached maxiter.') + + return xnew + + +def bubbleK(K,pt) : +# input : triangle K, point pt +# output : element bubble function + + # get reference bubble functions on unit triangle + refb1 = lambda x: 1-x[0]-x[1] + refb2 = lambda x: x[0] + refb3 = lambda x: x[1] + refb = lambda x: refb1(x)*refb2(x)*refb3(x)*27 + + # transform the triangle to get general bubble function + unitK = [[0,0],[1,0],[0,1]] + [trafo,M] = tritrafo(K,unitK,pt) + + val = refb(trafo) + + return val + +def gradbubbleK(K,pt) : +# input : triangle K, point x +# output : gradient of element bubble function + + # get reference bubble functions on unit triangle + refb1 = lambda x: 1-x[0]-x[1] + refb2 = lambda x: x[0] + refb3 = lambda x: x[1] + gradrefb1 = lambda x: [-1,-1] + gradrefb2 = lambda x: [1,0] + gradrefb3 = lambda x: [0,1] + gradrefb = lambda x: (np.multiply(gradrefb1(x),refb2(x))*refb3(x)+refb1(x)*np.multiply(gradrefb2(x),refb3(x))+refb1(x)*np.multiply(gradrefb3(x),refb2(x)))*27 + + # transform the triangle to get general bubble function + unitK = [[0,0],[1,0],[0,1]] + [trafo,M] = tritrafo(K,unitK,pt) + + # chain rule + DM = np.array([[M[0,0],M[0,1]],[M[1,0],M[1,1]]]) # = M[0:1,0:1] + val = np.matmul(gradrefb(trafo),DM) # order of multiplikation important! + + return val + +def LaplacebubbleK(K,pt) : +# input : triangle K, point x +# output : Laplace of element bubble function + + # get reference bubble functions on unit triangle + refb1 = lambda x: 1-x[0]-x[1] + refb2 = lambda x: x[0] + refb3 = lambda x: x[1] + gradrefb1 = lambda x: [-1,-1] + gradrefb2 = lambda x: [1,0] + gradrefb3 = lambda x: [0,1] + Hesse_refb = lambda x: [[27*(refb2(x)*gradrefb1(x)[0]*gradrefb3(x)[0]+refb3(x)*gradrefb1(x)[0]*gradrefb2(x)[0]+refb1(x)*gradrefb3(x)[0]*gradrefb2(x)[0]+refb2(x)*gradrefb3(x)[0]*gradrefb1(x)[0]+refb1(x)*gradrefb2(x)[0]*gradrefb3(x)[0]+refb3(x)*gradrefb2(x)[0]*gradrefb1(x)[0]) , 27*(refb2(x)*gradrefb1(x)[1]*gradrefb3(x)[0]+refb3(x)*gradrefb1(x)[1]*gradrefb2(x)[0]+refb1(x)*gradrefb3(x)[1]*gradrefb2(x)[0]+refb2(x)*gradrefb3(x)[1]*gradrefb1(x)[0]+refb1(x)*gradrefb2(x)[1]*gradrefb3(x)[0]+refb3(x)*gradrefb2(x)[1]*gradrefb1(x)[0])], + [27*(refb2(x)*gradrefb1(x)[0]*gradrefb3(x)[1]+refb3(x)*gradrefb1(x)[0]*gradrefb2(x)[1]+refb1(x)*gradrefb3(x)[0]*gradrefb2(x)[1]+refb2(x)*gradrefb3(x)[0]*gradrefb1(x)[1]+refb1(x)*gradrefb2(x)[0]*gradrefb3(x)[1]+refb3(x)*gradrefb2(x)[0]*gradrefb1(x)[1]) , 27*(refb2(x)*gradrefb1(x)[1]*gradrefb3(x)[1]+refb3(x)*gradrefb1(x)[1]*gradrefb2(x)[1]+refb1(x)*gradrefb3(x)[1]*gradrefb2(x)[1]+refb2(x)*gradrefb3(x)[1]*gradrefb1(x)[1]+refb1(x)*gradrefb2(x)[1]*gradrefb3(x)[1]+refb3(x)*gradrefb2(x)[1]*gradrefb1(x)[1])]] + + # transform the triangle to get general bubble function + unitK = [[0,0],[1,0],[0,1]] + [trafo,M] = tritrafo(K,unitK,pt) + + # double chain rule + DM1 = np.array([M[0,0],M[1,0]]) + DM2 = np.array([M[0,1],M[1,1]]) + val = np.dot(DM1,np.matmul(Hesse_refb(trafo),DM1)) + np.dot(DM2,np.matmul(Hesse_refb(trafo),DM2)) # due to Laplace chain rule + + return val + + +def bubbleE(K,index_notE,pt) : +# input : triangle K, indexKpt gives index of point of K that is not part of E (therefore uniquely defining E), point x +# output :edge bubble function bE + + # get reference bubble functions on unit triangle + refb1 = lambda x: 1-x[0]-x[1] + refb2 = lambda x: x[0] + refb3 = lambda x: x[1] + + if index_notE == 0 : # remove function that is associated to point not belonging to the edge + refb = lambda x: refb2(x)*refb3(x)*4 # eher 4 + elif index_notE == 1 : + refb = lambda x : refb1(x)*refb3(x)*4 + else : + refb = lambda x: refb1(x)*refb2(x)*4 + + # transform the triangle to get general bubble function + unitK = [[0,0],[1,0],[0,1]] + [trafo,M] = tritrafo(K,unitK,pt) + + return refb(trafo) + +def gradbubbleE(K,index_notE,pt) : + # input : triangle K, indexKpt gives index of point of K that is not part of E (therefore uniquely defining E), point x + # output : gradient of edge bubble function bE + + # get reference bubble functions on unit triangle + refb1 = lambda x: 1-x[0]-x[1] + refb2 = lambda x: x[0] + refb3 = lambda x: x[1] + gradrefb1 = lambda x: [-1,-1] + gradrefb2 = lambda x: [1,0] + gradrefb3 = lambda x: [0,1] + + if index_notE == 0 : # remove function that is associated to point not belonging to the edge + gradrefb = lambda x: (np.multiply(refb2(x),gradrefb3(x))+np.multiply(gradrefb2(x),refb3(x)))*4 + elif index_notE == 1 : + gradrefb = lambda x : (np.multiply(refb1(x),gradrefb3(x))+np.multiply(gradrefb1(x),refb3(x)))*4 + else : + gradrefb = lambda x: (np.multiply(refb1(x),gradrefb2(x))+np.multiply(gradrefb1(x),refb2(x)))*4 + + # transform the triangle to get general bubble function + unitK = [[0,0],[1,0],[0,1]] + [trafo,M] = tritrafo(K,unitK,pt) + + # chain rule + DM = [[M[0,0],M[0,1]],[M[1,0],M[1,1]]] # = M[0:1,0:1] + val = np.matmul(gradrefb(trafo),DM) # order of multiplikation important! + + return val + +def LaplacebubbleE(K,index_notE,pt) : + # input : triangle K, indexKpt gives index of point of K that is not part of E (therefore uniquely defining E), point x + # output : Laplace of edge bubble function bE + + # get reference bubble functions on unit triangle + gradrefb1 = lambda x: [-1,-1] + gradrefb2 = lambda x: [1,0] + gradrefb3 = lambda x: [0,1] + + if index_notE == 0 : # remove function that is associated to point not belonging to the edge + Hesse_refb = lambda x: [[4*(2*gradrefb2(x)[0]*gradrefb3(x)[0]) , 4*(gradrefb2(x)[0]*gradrefb3(x)[1]+gradrefb2(x)[1]*gradrefb3(x)[0])], + [4*(gradrefb2(x)[1]*gradrefb3(x)[0]+gradrefb2(x)[0]*gradrefb3(x)[1]) , 4*(2*gradrefb2(x)[1]*gradrefb3(x)[1])]] + elif index_notE == 1 : + Hesse_refb = lambda x: [[4*(2*gradrefb1(x)[0]*gradrefb3(x)[0]) , 4*(gradrefb1(x)[0]*gradrefb3(x)[1]+gradrefb1(x)[1]*gradrefb3(x)[0])], + [4*(gradrefb1(x)[1]*gradrefb3(x)[0]+gradrefb1(x)[0]*gradrefb3(x)[1]) , 4*(2*gradrefb1(x)[1]*gradrefb3(x)[1])]] + else : + Hesse_refb = lambda x: [[4*(2*gradrefb1(x)[0]*gradrefb2(x)[0]) , 4*(gradrefb1(x)[0]*gradrefb2(x)[1]+gradrefb1(x)[1]*gradrefb2(x)[0])], + [4*(gradrefb1(x)[1]*gradrefb2(x)[0]+gradrefb1(x)[0]*gradrefb2(x)[1]) , 4*(2*gradrefb1(x)[1]*gradrefb2(x)[1])]] + + # transform the triangle to get general bubble function + unitK = [[0,0],[1,0],[0,1]] + [trafo,M] = tritrafo(K,unitK,pt) + + # double chain rule + DM1 = np.array([M[0,0],M[1,0]]) + DM2 = np.array([M[0,1],M[1,1]]) + val = np.dot(DM1,np.matmul(Hesse_refb(trafo),DM1)) + np.dot(DM2,np.matmul(Hesse_refb(trafo),DM2)) # due to Laplace chain rule + return val + + +def get_timestep(v,K,tri) : + # input : v = grad_c, triangulation + # output: time step + + # motivated by positivity proof / CFL condition in Kolbe + # go through all CCs, find adjacent triangles in dual_mesh, calculate grad_c*n_E on each of these adjacent triangles + # not used in simulations + + upper_list = [] + + for i in range(len(K)) : + + neighbors = tri.neighbors[i] # get neighbors of triangle K[i] + + indices = [[1,2],[0,2],[0,1]] + val = 0 + + [integral,avrg,areaK] = avrgK(K[i],lambda x: np.dot(x,x)) + + for j in range(3): + + if neighbors[j] >= 0 : # i.e. edge E is interior + + E = K[i][indices[j]] + midE = 1/2*(E[0]+E[1]) + normalKE = unitouternormal(K[i],E) + + [integral,avrg,areaE] = avrgE(E,lambda x: np.dot(x,x)) + + val =+ np.abs(np.dot(v(midE),normalKE))*areaE + + else : # i.e. edge E is part of the boundary -> vKE = 0 for Neumann bdr condition + + val =+ 0 + + upper_list.append(areaK/val) + + + upper = np.min(upper_list) + + return upper + + + +# CHEMICAL DENSITY RELATED FUNCTIONS + +def hat(i,tri,points,x,oriented_edges,structure,edge_to_index,indexK) : +# 2D hat function on triangle +#input: index i of vertex in points, triangulation tri,K , point x, indexK: wenn -1 muss noch gesucht werden wenn > -1 dann index von triangle +#output: value of hat function at x + + # get triangle in which is x + if indexK < -0.5 : + global indexE + indexE = find_simplex1(x,oriented_edges,structure,indexE) + k = edge_to_index[indexE] + else : + k = indexK + + if i in tri.simplices[k] : + + # get adjacent points of i + L = [points[j] for j in tri.simplices[k] if not j==i ] + + M = np.zeros([3,3]) + rhs = np.zeros(3) + rhs[0] = 1 # nodal values for hat functions + + # get system of equations + M[0][0] = points[i][0] # a*x+b*y +c = y + M[0][1] = points[i][1] + M[0][2] = 1 + + M[1][0] = L[0][0] + M[1][1] = L[0][1] + M[1][2] = 1 + + M[2][0] = L[1][0] + M[2][1] = L[1][1] + M[2][2] = 1 + + coeff = np.linalg.solve(M,rhs) # solve linear system of equations + + y = coeff[0]*x[0]+coeff[1]*x[1]+coeff[2] + + else : + y = 0 # zero if outside of stencil + + return y + +def dxhat(i,tri,points,x,oriented_edges,structure,edge_to_index,indexK) : +# gradient 2d hat function on triangle +#input: index i of vertex in points, triangulation tri,K , point x, indexK, wenn -1 muss noch gesucht werden wenn > -1 dann index von triangle +#output: value of gradient of hat function at x + + if indexK < -0.5 : + global indexE + indexE = find_simplex1(x,oriented_edges,structure,indexE) + k = edge_to_index[indexE] + else : + k = indexK + + if i in tri.simplices[k] : + + # get adjacent points of i + L = [points[j] for j in tri.simplices[k] if not j==i] + + + M = np.zeros([3,3]) + rhs = np.zeros(3) + rhs[0] = 1 # nodal values for hat functions + + # get system of equations + M[0][0] = points[i][0] # a*x+b*y +c = y + M[0][1] = points[i][1] + M[0][2] = 1 + + M[1][0] = L[0][0] + M[1][1] = L[0][1] + M[1][2] = 1 + + M[2][0] = L[1][0] + M[2][1] = L[1][1] + M[2][2] = 1 + + coeff = np.linalg.solve(M,rhs) # solve linear system of equations + + grad = np.array([coeff[0],coeff[1]]) # gradient + + else : + grad = np.zeros(2) # zero if x is outside of stencil + + return grad + + +def assemble_FE_matrix1(tri_dual,K_dual,points_dual): +# see Fig. 3.14. in Bartels: Numerical Approximation of PDEs, 2015 +# input : triangulation +# output : (affine linear) FE matrix + + # initialize objects + iter = 0 + iter_max = 9*len(K_dual) + + I = np.zeros(iter_max) + J = np.zeros(iter_max) + X_diff = np.zeros(iter_max) + X_reac = np.zeros(iter_max) + + m_loc = 1/12*(np.ones([3,3]) + np.array([[1,0,0],[0,1,0],[0,0,1]])) # formular from Bartels book + + for i in range(len(K_dual)) : + + X_K = np.array([[1, 1, 1],[K_dual[i][0][0], K_dual[i][1][0], K_dual[i][2][0]],[K_dual[i][0][1], K_dual[i][1][1], K_dual[i][2][1]]]) + rhs = np.array([[0,0],[1,0],[0,1]]) + grads_K = np.linalg.solve(X_K,rhs) # compute gradients + vol_K = np.linalg.det(X_K)/2 # compute areaK + + # get matrix + for m in [0,1,2] : + for n in [0,1,2] : + I[iter] = tri_dual.simplices[i][m] + J[iter] = tri_dual.simplices[i][n] + X_diff[iter] = vol_K * np.dot(grads_K[m],grads_K[n]) # diffusion contributions (stiffness matrix) + X_reac[iter] = vol_K*m_loc[m][n] # reaction conrtibutions (mass matrix) + iter += 1 + + sparseA_diff = csr_matrix((X_diff[:iter], (I[:iter], J[:iter])), shape=(len(points_dual),len(points_dual))) + + sparseA_reac = csr_matrix((X_reac[:iter], (I[:iter], J[:iter])), shape=(len(points_dual),len(points_dual))) + + sparseA = sparseA_diff + sparseA_reac + + return sparseA + +def getc_FE1(interpol_rho,g,tri_dual,K_dual,points_dual,sparseA) : +# input : interpolation of FV solution rho, rhs g for manufactured solutions, dual triangulation (tri,K,points) and the FE matrix +# output: coefficients FE solution c + + b = np.zeros(len(points_dual)) + + aux = lambda x : interpol_rho(x)+g(x) + + for i in range(len(K_dual)) : + + # get objects from Bartels book + X_K = np.array([[1, 1, 1],[K_dual[i][0][0], K_dual[i][1][0], K_dual[i][2][0]],[K_dual[i][0][1], K_dual[i][1][1], K_dual[i][2][1]]]) + vol_K = np.linalg.det(X_K)/2 + mid_K = 1/3*(K_dual[i][0]+K_dual[i][1]+K_dual[i][2]) # midpoint of triangle K_dual + + for m in [0,1,2] : + b[tri_dual.simplices[i][m]] += 1/3* vol_K * aux(mid_K) # using midpoint rule + + c = spsolve(sparseA, b, permc_spec=None, use_umfpack=True) # solve system of equations + + # Then FE solution is c_h^n(x) = sum_i (c_i*hat_i(x)) + return c + + +def cfunc(c,tri,K,points,x,oriented_edges,structure,edge_to_index) : +# input: coefficients FE sol., point x, dual triangulation (tri,K,points), objects to make find_simplex for dual meshes more efficient +# output: function value of FE solution: c_h^n(x) = sum_i (c_i*hat_i(x)) + + val = 0 + global indexE + indexE = find_simplex1(x,oriented_edges,structure,indexE) + k = edge_to_index[indexE] # get index of triangle in which x lives + + for i in range(len(c)) : # sum over all coefficients c (stencil would be enough) + val += c[i]*hat(i,tri,points,x,oriented_edges,structure,edge_to_index,indexK = k) + + return val + +def grad_cfunc(c,tri,K,points,x,oriented_edges,structure,edge_to_index) : +# input: coefficients FE sol., point x, dual triangulation (tri,K,points), objects to make find_simplex for dual meshes more efficient +# output: gradient of FE solution: c_h^n(x) = sum_i (c_i*hat_i(x)) + + val = 0 + global indexE + indexE = find_simplex1(x,oriented_edges,structure,indexE) + k = edge_to_index[indexE] + + for i in range(len(c)) : # sum over all coefficients c (stencil would be enough) + val += c[i]*dxhat(i,tri,points,x,oriented_edges,structure,edge_to_index,indexK = k) + return val + + +def get_c_plot(pltx,tri,K,points,n,ht,cc,clim,fineness,interpol,oriented_edges,structure,edge_to_index) : +# input: plot resolution pltx, triangulation, time step n and step size ht, FE sol. cc, fineness, interpolation type and objects to make find_simplex for the dual mesh more effient +# output: plot of FE solution function c + + # discretize space into pixels + x = np.linspace(0, 1, pltx) + y = np.linspace(0, 1, pltx) + X, Y = np.meshgrid(x, y) + Z = np.zeros([pltx,pltx]) + + for i in range(pltx): + for j in range(pltx): + Z[j][i] = cfunc(cc,tri,K,points,[x[i],y[j]],oriented_edges,structure,edge_to_index) # compute value of pixel + # Z[j][i] = grad_cfunc(cc,tri,K,points,[x[i],y[j]])[0] + + # plot values Z + fig = plt.figure() + plt.pcolormesh(X,Y,Z,cmap = 'jet',vmin = clim[0],vmax = clim[1]) + plt.xlabel('x1') + plt.ylabel('x2') + plt.colorbar() + time = n*ht + title = 'test1_fineness'+str(fineness)+'_'+interpol+'_c at time step'+str(n) + plt.title(title) + image_title = title+'.png' + plt.savefig(image_title, bbox_inches='tight') + plt.close() + # plt.show() + + +def get_linv(K_dual,nppoints_dual,numtri_dual,tri_dual,v,nodal_avrg_choice) : +# input: triangulation (K,points,numtri etc), v=grad_c, nodal choice for linear interpolation +# output: approximation of gradient of v + + grad_cx = [] + grad_cy = [] + for i in range(len(K_dual)) : + mid = 1/3*(K_dual[i][0]+K_dual[i][1]+K_dual[i][2]) # get point inside K_dual[i] + + # v is constant on K_dual, so just evalute in the middle of the triangle + grad_cx.append(v(mid)[0]) + grad_cy.append(v(mid)[1]) + + grad_cx = np.array(grad_cx) + grad_cy = np.array(grad_cy) + + # linearly interpolate componentwise subject to nodal_avrg_choice + lin_vx = getq0(K_dual,nppoints_dual,numtri_dual,tri_dual,grad_cx,nodal_avrg_choice) + lin_vy = getq0(K_dual,nppoints_dual,numtri_dual,tri_dual,grad_cy,nodal_avrg_choice) + + return [lin_vx,lin_vy] + +def eval_linv(lin_vx,lin_vy,x,oriented_edges,structure,edge_to_index) : +# input : linearly inteprolated grad_c, point x, and objects to make find_simplex for dual mesh more efficient +# output: point evaluation of linear interpolation of piecewise constant v = grad_c + + global indexE + indexE = find_simplex1(x,oriented_edges,structure,indexE) + i = edge_to_index[indexE] # get index of triangle in which x lives + + val = [lin_vx[3*i]*x[0]+lin_vx[3*i+1]*x[1]+lin_vx[3*i+2], lin_vy[3*i]*x[0]+lin_vy[3*i+1]*x[1]+lin_vy[3*i+2]] # compute val=a*x+b*y+c componentwise + + return val + + +def approx_Laplace_cn(lin_vx,lin_vy,x,oriented_edges,structure,edge_to_index) : +# input: normal vector, linear interpolation to piecewise constant v=grad_c, objects that make find_simplex for dual mesh more efficient +# output: approximation to Laplace(c) + + global indexE + indexE = find_simplex1(x,oriented_edges,structure,indexE) + i = edge_to_index[indexE] + + Laplace = lin_vx[3*i]+lin_vy[3*i+1] # get divergence of linear interpolation of piecewise constant v=grad_c -> approximation to Laplace(c) + + return Laplace + + + +# BACTERIAL DENSITY RELATED FUNCTIONS + +def getvKE(K,E,v) : +# need this for functions finitevolumescheme_rho(...) +# input: triangle K, edge E, function v +# output: coefficient vKE from nicaise-paper, area of edge E + + normalKE = unitouternormal(K,E) + + aux = lambda x : np.dot(v(x),normalKE) + + [integral,vKE,areaE] = avrgE(E,aux) # vKE = avrg over edge E + + return [vKE,areaE] + +def getdistE(K1,K2) : +# need this for function finitevolumescheme_rho(...) +# input : neigboring triangles K1 and K2 +# output : distance of their circumcenters + + CC1 = circumcenter(K1) + CC2 = circumcenter(K2) + val = np.linalg.norm(CC1-CC2, ord = None) # distance + + return val + +def getrho(cc_old,f,ht,v,tri,K,numtri): +# input: FE sol. c from last time step as initial value for newton method, rhs f for manufactured solutions, temporal step size ht, v=grad_c, triangulation +# output: FV solution rho + + eps = 1 # parameter epsilon = 1 in Keller-Segel system + + rho = finitevolumescheme_rho(cc_old,ht,numtri,tri,K,eps,v,f) + + return rho + +def finitevolumescheme_rho(u_old,ht,numtri,tri,K,eps,v,f) : +# finite volume scheme to get rho with zero Neumann bdr via solving non-linear system of equations +# input: initial value for newton method u_old, temporal step size ht, triangulation, parameter eps = 1, v=grad_c, rhs f for manufactured solutions +# output: FV solution rho + + # initialize global objects + global global_u_old, global_ht + global_u_old = u_old + global_ht = ht + + # get non-linear system of equation to calculate num. sol. uh via FV scheme + def func(u) : + + sum_EK = np.zeros(len(K)) + for i in range(len(K)) : # go through triangles + + [fK,avrg,areaK] = avrgK(K[i],f) # get RHS fK subject to manufactured solutions + + neighbors = tri.neighbors[i] # get neighbors of triangle i + + indices = [[1,2],[0,2],[0,1]] # possible combinations of vertices to compose an edge, where j = 0,1,2 is not contained , order is impotant + + for j in range(3) : # go through edges + + E = K[i][indices[j]] # choose edge that is shared by K[i] and K[neighbors[j]] + + if neighbors[j] >= 0 : # i.e. edge E is interior + + [vKE,areaE] = getvKE(K[i],E,v) # avrg over v*nKE ( = 0 if E on boundary due to neumann boundary w.r.t. c ) + dE = getdistE(K[i],K[neighbors[j]]) # get distance between circumcenters of triangles + + if np.abs(u[i]-u[neighbors[j]]) < 10**-6 or u[i] < 10**-6 or u[neighbors[j]] < 10**-6 : # in this case centered flux to avoid divide by 0 or log(0) + sum_EK[i] = sum_EK[i] - eps*areaE/dE*(u[neighbors[j]]-u[i]) + vKE*areaE*1/2*(u[i]+u[neighbors[j]]) + else : + sum_EK[i] = sum_EK[i] - eps*areaE/dE*(u[neighbors[j]]-u[i]) + vKE*areaE*((u[i]-u[neighbors[j]])/(np.log(u[i])-np.log(u[neighbors[j]]))) + + else : # i.e. edge E is part of the boundary -> vKE = 0 for Neumann bdr condition + + sum_EK[i] = sum_EK[i] - eps*(0 + 0) #+ 0 (D_K,E) + 0 (vKE) + + sum_EK[i] = 1/areaK*sum_EK[i] - 1/areaK*fK # finite volume scheme + + return (u - global_u_old)/global_ht + sum_EK # difference quotient + finite volume scheme + + # get Jacobian to solve non-linear system of equations for the newton method + def grad_func(u) : + + #sum_EK = np.zeros(len(K)) + row = [] + col = [] + data = [] + for i in range(len(K)) : + + [fK,avrg,areaK] = avrgK(K[i],f) # need areaK for system of equations + neighbors = tri.neighbors[i] # get neighbors of triangle i + + indices = [[1,2],[0,2],[0,1]] # possible combinations of vertices to compose an edge, where j = 0,1,2 is not contained , order is impotant + + valK = 0 + for j in range(3) : # go through edges + valL = 0 + + E = K[i][indices[j]] # choose edge that is shared by K[i] and K[neighbors[j]] + + if neighbors[j] >= 0 : # i.e. edge E is interior + + [vKE,areaE] = getvKE(K[i],E,v) # avrg over v*nKE + dE = getdistE(K[i],K[neighbors[j]]) # get distance between circumcenters of triangles + + if np.abs(u[i]-u[neighbors[j]]) < 10**-6 or u[i] < 10**-6 or u[neighbors[j]] < 10**-6 : # in this case centered flux to avoid divide by 0 or log(0) + valK = valK + eps*(areaE/dE) + vKE*areaE*1/2 + valL = valL - eps*(areaE/dE) + vKE*areaE*1/2 + else : + valK = valK + eps*(areaE/dE) + vKE*areaE*((np.log(u[i])-np.log(u[neighbors[j]])-1+u[neighbors[j]]/u[i])/((np.log(u[i])-np.log(u[neighbors[j]]))**2)) + valL = valL - eps*(areaE/dE) + vKE*areaE*((-np.log(u[i])+np.log(u[neighbors[j]])-1+u[i]/u[neighbors[j]])/((np.log(u[i])-np.log(u[neighbors[j]]))**2)) + + # get sparse Jacobian + row.append(i) + col.append(neighbors[j]) + data.append(1/areaK*valL) + + + else : # i.e. edge E is part of the boundary -> vKE = 0 for Neumann bdr condition + + valK = valK + 0 + valL = valL + 0 + + + #diagonal entries + row.append(i) + col.append(i) + data.append(1/global_ht+1/areaK*valK) + + sparseJ = csr_matrix((data, (row, col)), shape = (numtri, numtri))#.toarray() + + return sparseJ # return jacobian in sparse form + + # APPLY NEWTON METHOD TO FIND ROOT U OF FUNC SUBJECT OT ITS JACOBIAN GRAD_U + uh = newton_method(func,grad_func, x0 = u_old ,tol = 1.5*10**-8, maxiter = 20) # tol like in fsolve + + return uh + + +def get_rho_plot(pltx,tri,n,ht,rho,rholim,fineness): +# plot the finite volume solution + +# input: resolution of plot pltx, triangulation, time step n, step size ht, FV solution rho, plot limits rholim, fineness +# output: plot of piecewise constatn FV sol. rho + + # initialize space discretization as pixels + x = np.linspace(0, 1, num = pltx) + y = np.linspace(0, 1, num = pltx) + X,Y = np.meshgrid(x,y) + Z = np.zeros([pltx,pltx]) + + for i in range(pltx): + for j in range(pltx): + pt = [x[i],y[j]] + k = tri.find_simplex(pt) # get triangle in which pt lies + Z[j][i] = rho[k] # get function value + + #colormap plot + fig = plt.figure() + plt.pcolormesh(X,Y,Z,cmap = 'jet',vmin = rholim[0],vmax = rholim[1]) # limits of plot + plt.xlabel('x1') + plt.ylabel('x2') + plt.colorbar() + + # # 3D-PLOT: + # X, Y = np.meshgrid(x, y) + # fig = plt.figure() + # ax = plt.axes(projection='3d') + # plt.xlabel('x1') + # plt.ylabel('x2') + # ax.axes.set_xlim3d(left=0, right=1) + # ax.axes.set_ylim3d(bottom=0, top=1) + # ax.axes.set_zlim3d(bottom=rholim[0], top=rholim[1]) + # p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='jet', edgecolor='none',vmin = rholim[0], vmax = rholim[1]) + # fig.colorbar(p, ax=ax) + + time = n*ht + title = 'fineness'+str(fineness)+'_FV-solution_ bacterial density rho at time '+"%f" % time + # title_theta = 'theta_K at time t='+"%f" % time # need this for plot_aposti_estimator + title_png = 'FV solution rho_h at time t='+"%f" % time + plt.title(title_png) + image_title = title+'.png' + plt.savefig(image_title, bbox_inches='tight') + plt.close() + # plt.show() + + + +# INTERPOLATION/RECONSTRUCTION + +def getinterpolationRHS(tri,K,uh) : +# reconstruct the diffusive flux of the FV scheme D_K,E = FKED (nicaise-paper notation) + +# input : triangulation related values tri,K and numercial solution uh +# output : RHS for the interpolation consisting of diffusive numerical flux D_K,E = FKED (nicaise-paper notation) + + FKED = [] # nicaise paper notation + + for i in range(len(K)) : + neighbors = tri.neighbors[i] # get neighbors of triangle subject to index i + + indices = [[1,2],[0,2],[0,1]] # possible combinations of vertices to compose an edge, where j = 0,1,2 is not contained , order is impotant + + auxFKED = [] + for j in range(3) : # go through neighboring triangles K[neighbors[j]] / go through edges of K[i] + + E = K[i][indices[j]] # choose edge that is shared by K[i] and K[neighbors[j]] + + [integral,avrg,areaE] = avrgE(E,lambda x: x) # need areaE + + if neighbors[j] >= 0 : # i.e. edge E is interior + + dE = getdistE(K[i],K[neighbors[j]]) # get distance between circumcenters of these two triangles + + auxFKED.append(areaE/dE*(uh[neighbors[j]]-uh[i])) + + else : # i.e. edge E is part of the boundary + auxFKED.append(0) # grad_rho*normal = 0 due to zero neumann bdr + + FKED.append(auxFKED) + + return FKED + + +def getq0(K,nppoints,numtri,tri,uh,string: str) : +# get linear interpolation part of the morley inteprolation q0 + +# input : triangles K, number of triangles numtri, numerical solution uh, string on how to compute nodal values +# output : parameters qq0 of affine linear functions q0 in form [a1,b1,c1,a2,b2,c2,...,aN,bN,cN] with N = len(K) + + # initilize objects + rhs = [] + row = [] + col = [] + data = [] + + for i in range(len(K)) : # go through all triangles + + for j in range(3): # go through all vertices + + pindex = tri.simplices[i][j] # get index of vertex + + neighborstri = find_neighbors(pindex, tri) # indices of triangles that touch vertex associated with pindex + + #calculate vertex_val + if string == 'least-squares' : # weighted mean with inner angles as weights -> see Batina, Rausch, Yang - Paper + + Rx, Ry, Ixx, Iyy, Ixy = 0, 0, 0, 0, 0 + x0 = nppoints[pindex] + for k in neighborstri : # go through all touching triangles + + CC = circumcenter(K[k]) # calculate the cell center + + Rx += (CC[0]-x0[0]) + Ry += (CC[1]-x0[1]) + Ixx += (CC[0]-x0[0])**2 + Iyy += (CC[1]-x0[1])**2 + Ixy += (CC[0]-x0[0])*(CC[1]-x0[1]) + + lambdax = (Ixy*Ry-Iyy*Rx)/(Ixx*Iyy-Ixy**2) # compute Lagrange multipliers + lambday = (Ixy*Rx-Ixx*Ry)/(Ixx*Iyy-Ixy**2) + + wi = [] + qi = [] + for k in neighborstri : # go through all touching triangles + CC = circumcenter(K[k]) + wi.append(1+lambdax*(CC[0]-x0[0])+lambday*(CC[1]-x0[1])) # get weights + qi.append(uh[k]) + + wi = np.array(wi) + qi = np.array(qi) + + if np.sum(wi) == 0 : + vertex_val = np.sum(qi)/len(qi) + else : + vertex_val = np.dot(wi,qi)/np.sum(wi) + + + elif string == 'arithmetic-mean' : # calssic, arithmetic mean + + vertex_val = 0 + for k in neighborstri : # go through neighbors + vertex_val += uh[k] + vertex_val = vertex_val/len(neighborstri) # arithmetic mean + + else : + print('Error: invalid string input') + + rhs.append(vertex_val) # assign mean value over all triangles touching vertex + + # get system of equaton via a*x+b*y+c, in sparse form + row.append(3*i+j) # for a + col.append(3*i+0) + data.append(K[i][j][0]) # triangle i, vertex j, coordinate 0 i.e. x + row.append(3*i+j) # for b + col.append(3*i+1) + data.append(K[i][j][1]) # triangle i, vertex j, coordinate 1 i.e. y + row.append(3*i+j) # for c + col.append(3*i+2) + data.append(1) + + sparseQ0 = csr_matrix((data, (row, col)), shape = (3*numtri, 3*numtri)) + qq0 = spsolve(sparseQ0, rhs, permc_spec=None, use_umfpack=True) # solve system of equations + + return qq0 # coefficients piecewise linear interpolation + + +def getbetaE(K,qq0,FKED) : +# get morley coefficients betaKE + +# input : all tirangles K, coefficients qq0 of q0, diffusive numerical flux DKE = FKED (nicaise-paper notation) +# output : values betaKE of form [[three-dim-array of edges of K1],[three-dim-array of edges of K2],....,[three-dim-array of edges of KN]] where N = len(K) + + betaKE = [] # initilize + + for i in range(len(K)) : # go through all triangles + + gradq0 = lambda x : [qq0[3*i],qq0[3*i+1]] # = [a,b] if q0|K = a*x+b*y+c + + indices = [[1,2],[0,2],[0,1]] # possible combinations of vertices to compose an edge, where j = 0,1,2 is not contained , order is impotant!! + betaE = [] + + for j in range(3) : # go through neighboring triangles K[neighbors[j]] i.e. go through edges of K[i] + E = K[i][indices[j]] # choose edge that is shared by K[i] and K[neighbors[j]] + normalKE = unitouternormal(K[i],E) # get normal + + # compute betaKE via closed form by plugging Morley inteprolant into degree of freedom associated with betaKE + aux = lambda x : np.dot(gradq0(x),normalKE) # get integral over gradq0*normal i.e. normal derivative of q0 + [integral,avrg, areaE] = avrgE(E,aux) + I = integral + + aux = lambda x : bubbleE(K[i],j,x)*np.dot(gradbubbleK(K[i],x),normalKE) # get integral bubbleE*gradbubbleK*normal + [integral,avrg, areaE] = avrgE(E,aux) + II = integral + + aux_beta = (FKED[i][j]-I)/(II) # closed form for betaKE + + betaE.append(aux_beta) + + betaKE.append(betaE) + + return betaKE + + +def morley_interpol(K,tri,qq0,betaKE,x,indexK) : +# input : triangulation K,tri / coefficients qq0, betaKE / point x and index of the triangles in which is lives if known +# output : function value of gradient of morley interpolant at point x + + if indexK >= -0.5 : # give possibility to indicate triangle on which we work + i = indexK + else : + i = tri.find_simplex(x) # get triangle K[i] in which x is contained # get triangle K[i] in which x is contained + + q0 = qq0[3*i]*x[0]+qq0[3*i+1]*x[1]+qq0[3*i+2] # get linear q0 on triangle K[i] + + aux = 0 + + for j in range(3) : # sum over all edges of K[i] + + aux += betaKE[i][j]*bubbleE(K[i],j,x)*bubbleK(K[i],x) # betaKE contributions to morley + + return q0+aux + + +def grad_morley_interpol(K,tri,qq0,betaKE,x,indexK) : +# input : triangulation K,tri / coefficients qq0, betaKE / point x and index of the triangles in which is lives if known +# output : function value of gradient of morley interpolant at point x + + if indexK >=- 0.5 : # give possibility to indicate triangle on which we work + i = indexK + else : + i = tri.find_simplex(x) # get triangle K[i] in which x is contained + + II = 0 + for j in range(3) : # go through edges + + II += betaKE[i][j]*(gradbubbleE(K[i],j,x)*bubbleK(K[i],x)+gradbubbleK(K[i],x)*bubbleE(K[i],j,x)) # get beta contirbutions + + return np.array([qq0[3*i],qq0[3*i+1]])+II # piecewise grad_morley = grad_q0 + beta*(...) = [a,b]+beta*(...) + + +def Laplace_morley(K,tri,qq0,betaKE,x,indexK) : +# input : triangulation K,tri / coefficients qq0, betaKE / point x and index of the triangles in which is lives if known +# output : fucntion value of the Laplacian of morley interpolant at point x + + if indexK >=- 0.5 : # give possibility to indicate triangle on which we work + i = indexK + else : + i = tri.find_simplex(x) # get triangle K[i] in which x is contained + + II = 0 + for j in range(3) : # go through edges + + aux = lambda x : [qq0[3*i],qq0[3*i+1]] # grad_q0 + div_grad_q0 = lambda x : get_second_derivative(aux,K,tri,x,index=i) # divergence of the component-wise linear interpolation of the piecewise constant gradient grad_q0 + II += betaKE[i][j]*(LaplacebubbleE(K[i],j,x)*bubbleK(K[i],x)+LaplacebubbleK(K[i],x)*bubbleE(K[i],j,x)+2*np.dot(gradbubbleE(K[i],j,x),gradbubbleK(K[i],x))) # product rule on beta contributions + + return div_grad_q0(x)+II # piecewise Laplace_morley = "Laplace" q0 + alpha*(...)+beta*(...) + + +def get_second_derivative(grad_q0,K,tri,x,index) : +# need this for Laplacian of Morley, which is in turn needed to compute element-wise reisduals for the a posteriori error estimates +# input : piecewise constant grad_q0 / triangulation K,tri / point x and the index of the triangle in which it lives if known +# output: divergence of linear interpolation of piecewise constant grad_q0 + + if index > -0.5 : # if primal index + i = index + else : + i = tri.find_simplex(x) + + bx = [] + by = [] + for j in range(3) : + pindex = tri.simplices[i][j] # get index of vertex + + neighborstri = find_neighbors(pindex, tri) # indices of triangles that touch vertex associated with pindex + + # get nodal values + vertex_valx = 0 + vertex_valy = 0 + for k in neighborstri : # go thorugh neighboring points + midk = 1/3*(K[k][0]+K[k][1]+K[k][2]) # middle of triangle, as function is piecewise constant it doesent matter where we evaluate + vertex_valx += grad_q0(midk)[0] # x-direction + vertex_valy += grad_q0(midk)[1] # y-direction + vertex_valx = vertex_valx/len(neighborstri) # arithmetic mean + vertex_valy = vertex_valy/len(neighborstri) # arithmetic mean + bx.append(vertex_valx) + by.append(vertex_valy) + + # get linear system of equation to solve for linear inteprolation a*x+b*y+c = val + M = [[K[i][0][0],K[i][0][1],1],[K[i][1][0],K[i][1][1],1],[K[i][2][0],K[i][2][1],1]] + + lin_vx = np.linalg.solve(M,bx) + lin_vy = np.linalg.solve(M,by) + + div = lin_vx[0]+lin_vy[1] # get divergence of linear inteprolation + + return np.array(div) + + +def getmorleyplot(n,ht,K,tri,qq0,betaKE,h,rholim,fineness) : +# input : time step n, step size ht / triangulation K,tri,points / morley coefficients qq0, betaKE / plot mesh size h, plot limits rholim, fineness +# output : *shows/saves plot* + + def exact_rho(x,t) : + val = 1/(1+t)*np.exp(-25*(x[0]-1/2)**2-25*(x[1]-1/2)**2) + return val + + x = np.linspace(0, 1, int(1/h)) + y = np.linspace(0, 1, int(1/h)) + X, Y = np.meshgrid(x, y) + + Z = np.zeros([len(X),len(Y)]) + for i in range(len(X)) : + for j in range(len(Y)) : + pt = [x[i],y[j]] + Z[j][i] = morley_interpol(K,tri,qq0,betaKE,pt,indexK=-1) # plot morley interpolant + #Z[j][i] = exact_rho(pt,n*ht) # plot exact solution + #Z[j][i] = grad_morley_interpol(K,tri,v,grad_vn,qq0,betaKE,pt,indexK=-1)[1] # plot gradient of morley in one direction + + # #colormap plot + # fig = plt.figure() + # plt.pcolormesh(Z,cmap = 'jet',vmin = rholim[0],vmax = rholim[1]) # limits of plot + # plt.xlabel('x1') + # plt.ylabel('x2') + # plt.colorbar() + + # 3D-PLOT: + fig = plt.figure() + ax = plt.axes(projection='3d') + plt.xlabel('x1') + plt.ylabel('x2') + ax.axes.set_xlim3d(left=0, right=1) + ax.axes.set_ylim3d(bottom=0, top=1) + ax.axes.set_zlim3d(bottom=rholim[0], top=rholim[1]) + p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='jet', edgecolor='none',vmin = rholim[0], vmax = rholim[1]) + fig.colorbar(p, ax=ax) + + time = n*ht + title = 'fineness'+str(fineness)+'_morley interpolation _ bacterial density rho at time '+"%f" % time + title_png = 'Morley interpolation at time '+"%f" % time + # title = 'Exact rho at time '+"%f" % time + plt.title(title_png) + image_title = title+'.png' + plt.savefig(image_title, bbox_inches='tight') + plt.close() + #plt.show() + + + +# A POSTERIORI ERROR ESTIMATES + +def diam(K) : +# get approximately the diameter of triangle K, need this for various residual estimates + +# input: triangle K +# output: length of the longest edge, which differs the actual diameter only by a constant + + indices = [[1,2],[0,2],[0,1]] + val_max = 0 + for j in range(3) : # go through edges + E = K[indices[j]] + val = np.linalg.norm(E[1]-E[0]) # get length of edge + if val > val_max : # update val_max if val is larger + val_max = val + + return val_max + + +def get_theta_res(cc_m1,Laplace_v_m1,qq0_0,qq0_p1,betaKE_0,betaKE_p1,ht_0,K,tri,K_dual,tri_dual,points_dual,oriented_edges,structure,edge_to_index) : +# get local residual for each triangle K +# input: c^n-1, Laplace(c^n-1), morley^n, morley^n+1, time step size ht_0, primal and dual triangulation, objects to make find_simplex for dual mesh more efficient +# output: the local residual part of the a posteriori error estimator + + res_0 = [] + for i in range(len(K)) : + + morley_0 = lambda x: morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get morley^n + morley_p1 = lambda x: morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=i) # get morley^n+1 + + grad_morley_0 = lambda x: grad_morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get grad_morley^n + + Laplace_morley_0 = lambda x: Laplace_morley(K,tri,qq0_0,betaKE_0,x,indexK=i) # get Laplace_morley^n + + grad_c_m1 = lambda x : grad_cfunc(cc_m1,tri_dual,K_dual,points_dual,x,oriented_edges,structure,edge_to_index) #get grad_c^n-1 + + # GET L2 NORM OF LOCAL RESIDUAL + aux_0 = lambda x : ((morley_p1(x)-morley_0(x))/ht_0 + np.dot(grad_morley_0(x),grad_c_m1(x)) + morley_0(x)*Laplace_v_m1(x) - Laplace_morley_0(x))**2 + [integral_0,avrg,areaK] = avrgK(K[i],aux_0) + + hK = diam(K[i]) # get diameter of K + + res_0.append(integral_0*hK**2) # full term + + return res_0 + + +def get_theta_diff(qq0_0,betaKE_0,K,tri) : +# get grad_morley jump terms for each triangle K + +# input: morley^n and primal triangulation +# output: grad_morley jump terms from the a posteriori error estimator + + diff0 = [] + for i in range(len(K)) : # go through triangles + + indices = [[1,2],[0,2],[0,1]] + + grad_morley_0 = lambda x: grad_morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get grad_morley^n + + neighbors = tri.neighbors[i] # get neighbors of triangle associated with index i + indices = [[1,2],[0,2],[0,1]] # possible combinations of vertices to compose an edge, where j = 0,1,2 is not contained , order is impotant + + val0 = 0 + for j in range(3) : # go through edges + + E = K[i][indices[j]] # choose edge that is shared by K[i] and K[neighbors[j]] + + if neighbors[j] >= 0 : # i.e. edge E is interior + + grad_morley_0k = lambda x: grad_morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=neighbors[j]) # get grad_morley subject to neighboring triangle + + normalE = unitouternormal(K[i],E) # get normal vector + + aux0 = lambda x : np.dot(grad_morley_0k(x)-grad_morley_0(x),normalE)**2 # edge L2 norm of the jump + [integral0,avrg,areaE] = avrgE(E,aux0) + + val0 += integral0*areaE # sum over edges + + else : # i.e. edge E is part of the boundary -> vKE = 0 for Neumann bdr condition + + val0 += 0 # due to homogenous Neumann boundary condition + + diff0.append(val0) + + return diff0 + + +def get_theta_time(qq0_0,qq0_p1,betaKE_0,betaKE_p1,rho_m1,rho_0,rho_p1,ht_m1,ht_0,K,tri,n) : +# get time terms for each triangle K + +# input: morley^n, morley^n+1, FV sol rho^n, FV sol rho^n-1, FV sol rho^n+1, step sizes ht^n-1 and ht^n, primal triangulation, time step n +# output: time contributions to the a posteriori error estimator + + if n == 0 : # slightly different situation at early times + + time0 = [] + time1 = [] + for i in range(len(K)) : + morley_0 = lambda x: morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get morley^n + morley_p1 = lambda x: morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=i) # get morley^n+1 + + aux = lambda x : ((morley_p1(x)-morley_0(x))/ht_0 - (rho_p1[i]-rho_0[i])/ht_0)**2 # L2 norm to get the first time term + [integral,avrg,areaK] = avrgK(K[i],aux) + + time0.append(integral) # first time term + time1.append(0) # no second time term at early times + + + else : + time0 = [] + time1 = [] + for i in range(len(K)) : + morley_0 = lambda x: morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get morley^n + morley_p1 = lambda x: morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=i) # get morley^n+1 + + aux = lambda x : ((morley_p1(x)-morley_0(x))/ht_0 - (rho_p1[i]-rho_0[i])/ht_0)**2 # L2 norm to get the first time term + [integral,avrg,areaK] = avrgK(K[i],aux) + + val = areaK*((rho_p1[i]-rho_0[i])/ht_0 - (rho_0[i]-rho_m1[i])/ht_m1)**2 # # L2 norm to get the second time term + + time0.append(integral) # first time term + time1.append(val) # second time term + + return [time0,time1] + + +def get_maximum_morley(rhoFV,betaKE): +# need this for function get_theta_omega(...) + +# input : FV sol rhoFV and moerley coefficients betaKE +# output: approximated Linf(Omega)-norm of morley interpoaltion + + [nodal_max,beta_max] = get_max_q0_beta(rhoFV,betaKE) + + return nodal_max + beta_max # sum both maximal values + + +def get_max_q0_beta(rhoFV,betaKE) : +# need this for function get_theta_omega(...) +# idea: get maximal possible values for q0 and for beta-terms individually and then add them and use this as an upper bound + +# input : FV solution rhoFV and morley coefficients betaKE +# output: maximal possible values for q0 and for beta-terms individually + + nodal_max = np.max(rhoFV) # > arithmetic means to calculate nodal values for linear interpolation + beta_max = np.max(np.max(np.abs(betaKE),axis=0)) # > max bubbleE*bubbleK as max(bubble) = 1 + + return [nodal_max,beta_max] + + +def get_theta_omega(max1,max2,rho_0,rho_p1,betaKE_0,betaKE_p1,n) : +# get theta_omega term from masterthesis + +# input: max1 and max2 that were already calculated in the previous time step, morley^n, morley^n+1, FV sol rho^n, FV sol rho^n+1 +# output: theta_omega + + if n == 0 : # slightly different situation at early times + + max1 = get_maximum_morley(rho_p1,betaKE_p1) # no difference + max2 = get_maximum_morley(np.array(rho_0)-np.array(rho_p1),np.array(betaKE_0)-np.array(betaKE_p1)) # difference + + val = (max1+max2)*max2 # = theta_omega + + else : + max3 = max1 # update terms from previous time step + max4 = max2 + max1 = get_maximum_morley(rho_p1,betaKE_p1) # no difference + max2 = get_maximum_morley(np.array(rho_0)-np.array(rho_p1),np.array(betaKE_0)-np.array(betaKE_p1)) # difference + + val = (max1+max2)*max2+max3*max4 # = theta_omega + + return [val,max1,max2] # max1 and max2 will be used to calculate theta_omega for the next time step + + +def get_conv_terms(v_0,qq0_p1,betaKE_p1,rho_p1,K,tri) : +# get convection contributions for each triangle K + +# input: v^n=grad_c^n, morley^n+1, FV sol rho^n+1, primal triangulation +# output: convection contributions to the a posteriori error estimator + + conv_terms = [] + for i in range(len(K)) : + + morley_p1 = lambda x: morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=i) # get morley^n+1 + + indices = [[1,2],[0,2],[0,1]] + neighbors = tri.neighbors[i] # get nieghbors of triangle subject to index i + + hK = diam(K[i]) # get approximated diameter of the triangle + + val = 0 + for j in range(3) : # go through all edges + E = K[i][indices[j]] + + normalKE = unitouternormal(K[i],E) # get normal vector subject to edge E + + # COMPUTE convective term F_E/areaE + if np.abs(rho_p1[i]-rho_p1[neighbors[j]]) < 10**-6 or rho_p1[i] < 10**-6 or rho_p1[neighbors[j]] < 10**-6 : # in this case centered flux to avoid divide by 0 or log(0) + F_E_over_areaE = 1/2*(rho_p1[i]+rho_p1[neighbors[j]]) + else : # log-mean flux + F_E_over_areaE = (rho_p1[i]-rho_p1[neighbors[j]])/(np.log(rho_p1[i])-np.log(rho_p1[neighbors[j]])) + + aux = lambda x : (np.dot(v_0(x),normalKE)*(morley_p1(x)-F_E_over_areaE))**2 # edge L2 norm + [integral,avrg,areaE] =avrgE(E,aux) + + val += np.sqrt(integral)*hK/np.sqrt(areaE) # sum over edges + + conv_terms.append(val**2) + + return conv_terms + + +def get_morley0_coeff(rho_0,nppoints,numtri,tri,K) : +# get artificial morley interpolation at time zero +# need this for extra term at early times + +# input: rho_0 discrete initial datum, morley^1, FV sol rho^1, primal triangulation +# output: coefficients of artificial morley interpolation at time t=0 + + qq0 = getq0(K,nppoints,numtri,tri,rho_0,string = 'least-squares') # get linear interpolation q0 + + FKED = getinterpolationRHS(tri,K,rho_0) # get diffusive numerical fluxes DKE = FKED (nicaise-paper notation) from FV scheme + + betaKE = getbetaE(K,qq0,FKED) # get coefficients betaKE + + return [qq0,betaKE] + + +def get_early_time_error(K,tri,v_0,qq0_0,qq0_p1,betaKE_0,betaKE_p1) : +# get extra term at early times for each triangle K + +# input: primal triangulation, v^n=grad_c^n, morley^n, morley^n+1 +# output: extra term at early times for the a posteriori error estimator + + res_0 = [] + for i in range(len(K)) : + + morley_0 = lambda x: morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get morley^0 + morley_p1 = lambda x: morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=i) # get morley^1 + + grad_morley_0 = lambda x: grad_morley_interpol(K,tri,qq0_0,betaKE_0,x,indexK=i) # get grad_morley^0 + grad_morley_p1 = lambda x: grad_morley_interpol(K,tri,qq0_p1,betaKE_p1,x,indexK=i) # get grad_morley^1 + + aux_0 = lambda x : np.linalg.norm((morley_p1(x)-morley_0(x))*v_0(x) + (grad_morley_p1(x) - grad_morley_0(x)))**2 # L2 norm + [integral_0,avrg,areaK] = avrgK(K[i],aux_0) + + res_0.append(integral_0) + + + return res_0 \ No newline at end of file diff --git a/2d/plot_aposti_estimator.py b/2d/plot_aposti_estimator.py new file mode 100644 index 0000000..2be7a9a --- /dev/null +++ b/2d/plot_aposti_estimator.py @@ -0,0 +1,94 @@ +import myfun as my +import numpy as np +import time +import pickle + +# ------------------------------------------------------- PLOT A POSTERIORI ERROR ESTIMATOR --------------------------------------------------- +# This script plots the element-wise a posteriori error estimator \theta_K to see which elements would be refined when following an +# adaptive mesh refinement strategy. + +tic = time.time() + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'least-squares' +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'blow-up' +fineness = 3 # number of refinements of mesh before calculating numerical solution +Nt = 8 # number of time steps used + +n = 6 # time step snapshot + +if test == 'blow-up' : +# CONVECTION DEOMINATED REGIME i.e. BLOW-UP + + def rho0(x) : + val = 10**(3)*np.exp(-((x[0]-0.5)**2+(x[1]-0.5)**2)/10**(-2)) + return val + + rholim = [0,5000] + T = 0.0045 # time interval [0,T] + +else : + print('Warning: Wrong string input for test.') + +# SCHEME/PROBLEM PARAMETERS +hx = 1/2 +ht = (T)/(Nt) + +# GET PRIMAL MESH : +[tri,points,nppoints,numtri] = my.initialmesh() # initialize zerost mesh for FV approximations +K = nppoints[tri.simplices] + +# refine zerost mesh subject to fineness +for i in range(fineness) : + hx = 1/2*hx + [tri,points,nppoints,numtri] = my.refinemesh(points, K, numtri) + K = nppoints[tri.simplices] + +# print some plot/solution related quantities +print('hx',hx) +print('ht',ht) +pltx = 5000 +print('pltx',pltx) + +# initialite object +theta_K = [] + +if n == 0 : # slightly different situation at early times + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_early,theta_diff0,theta_time0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n+1)+'.p' + [theta_res1,theta_diff1,theta_time1,theta_conv1,theta_conv1,theta_Omega1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # compute \theta_K for each element K + for i in range(len(K)) : + theta_K.append(theta_early[i]+theta_res1[i]+theta_diff0[i]+theta_diff1[i]+theta_time0[i]+theta_conv1[i]) + + + +else : # n=/=0 + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_res0,theta_diff0,theta_time0,theta_conv0,theta_conv0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n+1)+'.p' + [theta_res1,theta_diff1,theta_time1,theta_conv1,theta_conv1,theta_Omega] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # compute \theta_K for each element K + for i in range(len(theta_res1)) : + theta_K.append(theta_res0[i]+theta_res1[i]+theta_diff0[i]+theta_diff1[i]+theta_time0[i]+theta_time1[i]+theta_conv0[i]+theta_conv1[i]) + + +theta_K = np.array(theta_K) + +# get plot limits for error estimator +thetalim = [0,np.max(np.max(theta_K))] + +# plot theta_K as piecewise constant function +my.get_rho_plot(pltx,tri,n,ht,theta_K,thetalim,fineness) + \ No newline at end of file diff --git a/2d/plot_bubble.py b/2d/plot_bubble.py new file mode 100644 index 0000000..309a4e2 --- /dev/null +++ b/2d/plot_bubble.py @@ -0,0 +1,56 @@ +import myfun as my +import numpy as np +from matplotlib import pyplot as plt + + +#PLOT BUBBLE FUNCTIONS + +hplt = 200 + +K = [[0, 0],[0, 1],[1 ,0]] +L = [[1, 1],[0, 1],[1 ,0]] +x = np.linspace(0, 1, hplt) +y = np.linspace(0, 1, hplt) +X, Y = np.meshgrid(x, y) + + +Z = np.zeros([len(y),len(x)]) +for i in range(len(x)) : + for j in range(len(y)) : + + pt = [x[i],y[j]] + l = 0 + if pt[0]+pt[1]<1 : + # Z[j][i] = my.bubbleE(K,l,pt) + Z[j][i] = my.bubbleK(K,pt) + # else: + # Z[j][i] = my.bubbleE(L,l,pt) + + + +# #colormap plot +fig = plt.figure() +plt.pcolormesh(Z,cmap = 'jet') # limits of plot +plt.xlabel('x1') +plt.ylabel('x2') +plt.colorbar() + +# 3D-PLOT: +# X, Y = np.meshgrid(x, y) +# fig = plt.figure() +# ax = plt.axes(projection='3d') +# plt.xlabel('x1') +# plt.ylabel('x2') +# # ax.axes.set_xlim3d(left=0, right=1) +# # ax.axes.set_ylim3d(bottom=0, top=1) +# # ax.axes.set_zlim3d(bottom=rholim[0], top=rholim[1]) +# p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='jet', edgecolor='none') +# fig.colorbar(p, ax=ax) +# title = 'bubbleK' +# plt.title(title) +# image_title = title+'.png' +#plt.savefig(image_title, bbox_inches='tight') +# #plt.close() +plt.show() + + \ No newline at end of file diff --git a/2d/plot_meshes.py b/2d/plot_meshes.py new file mode 100644 index 0000000..dcceb29 --- /dev/null +++ b/2d/plot_meshes.py @@ -0,0 +1,47 @@ +import myfun as my +import numpy as np +from scipy.spatial import Voronoi, voronoi_plot_2d +from matplotlib import pyplot as plt + + +# ------------------------------------------------------- PLOT A POSTERIORI ERROR ESTIMATOR --------------------------------------------------- +# This script plots the primal mesh, the voronoi diagram of the primal mesh and the dual mesh in order to better understand the construction +# of the dual mesh. + + +fineness = 1 # number of refinements of mesh before calculating numerical solution + +[tri,points,nppoints,numtri] = my.initialmesh() # initialize zerost mesh for FV approximations +K = nppoints[tri.simplices] + +hx = 1/2 # mesh size of the zerost mesh + +# refine zerost mesh subject to fineness +for i in range(fineness) : + hx = 1/2*hx + [tri,points,nppoints,numtri] = my.refinemesh(points, K, numtri) + K = nppoints[tri.simplices] +print('numtri',numtri) + +vor = Voronoi(nppoints) + +# get dual mesh +[K_dual, tri_dual, points_dual, nppoints_dual, numtri_dual] = my.getdualmesh(points, tri, fineness, K) +K_dual = np.array(K_dual) +[tri_dual,K_dual] = my.orientation_dualmesh(tri_dual,K_dual) # enforce positive orientation of all triangles in dual mesh + + +# PLOT THE PRIMAL MESH +plt.triplot(nppoints[:,0], nppoints[:,1], tri.simplices,color='steelblue') +plt.show() + +# PLOT THE VORONOI DIAGRAM, I.E. THE DIRICHLET TESSELATION, OF THE PRIMAL MESH +fig = voronoi_plot_2d(vor) +plt.show() + +# PLOT THE DUAL MESH +plt.triplot(nppoints_dual[:,0], nppoints_dual[:,1], tri_dual.simplices,color='green') + + +# plt.savefig('test.png', bbox_inches='tight') +plt.show() diff --git a/2d/plot_num-sol.py b/2d/plot_num-sol.py new file mode 100644 index 0000000..42c3181 --- /dev/null +++ b/2d/plot_num-sol.py @@ -0,0 +1,146 @@ +import myfun as my +import numpy as np +import time +import pickle + + +# ----------------------------------------------------------- PLOT NUMERICAL SOLUTIONS --------------------------------------------------- +# This script plots the numerical solutions and Morley interpolations obtained via the FV-FE algorithm. + +tic = time.time() + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'arithmetic-mean' # 'arithmetic-mean' or 'least-squares' +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'manufactured1' #'diff', 'blow-up', 'manufactured' or 'manufactured1' +fineness = 3 # number of refinements of mesh before calculating numerical solution +Nt = 8 # number of time steps used + +# initialize different test cases +if test == 'blow-up' : +# CONVECTION DEOMINATED REGIME i.e. BLOW-UP + + def rho0(x) : + val = 10**(3)*np.exp(-((x[0]-0.5)**2+(x[1]-0.5)**2)/10**(-2)) + return val + + rholim = [0,5000] + + T = 0.0045 # time interval [0,T] + +elif test == 'diff' : +# DIFFUSION DOMINATED REGIME + def rho0(x) : + val = 1.3*np.exp(-25*(x[0]-1/2)**2-25*(x[1]-1/2)**2) + return val + + rholim = [0,1.3] + + T = 0.05 # time interval [0,T]# + +elif test == 'manufactured' : +# MANUFACTURED SOLUTION WITH T = 0.5 + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + rholim = [0,1.3] + + T = 0.5 # time interval [0,T]# + +elif test == 'manufactured1' : +# MANUFACTURED SOLUTION WITH T = 3 + + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + rholim = [0,1.3] + + T = 3 # time interval [0,T]# + +else : + print('Warning: Wrong string input for test.') + +# SCHEME/PROBLEM PARAMETERS +hx = 1/2 +ht = (T)/(Nt) + +# GET PRIMAL MESH : +[tri,points,nppoints,numtri] = my.initialmesh() # initialize zerost mesh for FV approximations +K = nppoints[tri.simplices] + +# refine zerost mesh subject to fineness +for i in range(fineness) : + hx = 1/2*hx + [tri,points,nppoints,numtri] = my.refinemesh(points, K, numtri) + K = nppoints[tri.simplices] + +# # NEED THIS ONLY TO PLOT CHEMICAL CONCENTRATION c +# #get dual mesh for FE method to approximate chemical density +# [K_dual, tri_dual, points_dual, nppoints_dual, numtri_dual] = my.getdualmesh(points, tri, fineness, K) +# K_dual = np.array(K_dual) +# [tri_dual,K_dual] = my.orientation_dualmesh(tri_dual,K_dual) # enforce positive orientation of all triangles in dual mesh + +# # compute mesh topology for faster find_simplex routine for dual mesh +# [oriented_edges,structure,edge_to_index] = my.get_edges_structure(K_dual) +# my.init_indexE(numtri_dual) + +# print some plot/solution related quantities +print('hx',hx) +print('ht',ht) +pltx = 200 +print('pltx',pltx) + +for n in [2,4,6,8] :# time snapshots that we want to plot # [0, 5, 10, 15] in kwon paper + + progress = n/Nt*100 + print("%.2f" % progress, 'procent of progress made.') + + # # PLOT CHEMICAL CONCENTRATION c + # # load data related to c + # pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_c at time step'+str(n)+'.p' + # cc = pickle.load(open('pickle_files/'+pickle_name,'rb')) + # clim = [0,1.3] # set plot limits + # # plot piecewise linear FE solution c + # my.get_c_plot(pltx,tri_dual,K_dual,points_dual,n,ht,cc,clim,fineness,interpol,oriented_edges,structure,edge_to_index) + + # PLOT BACTERIAL DENSITY rho + # load data related to rho + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_rho at time step'+str(n)+'.p' + [ht,rho,qq0,betaKE] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + if n == 0 : + [rhoFV,rho0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + # plot FV solution rho0 aka. projected initial datum for n=0 + my.getmorleyplot(n,ht,K,tri,qq0,betaKE,1/pltx,rholim,fineness) + #my.get_rho_plot(pltx,tri,n,ht,rhoFV,rholim,fineness) + else : + [ht,rho,qq0,betaKE] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + # plot interpolation of Morley-type + my.getmorleyplot(n,ht,K,tri,qq0,betaKE,1/pltx,rholim,fineness) + # plot FV solution rho0 + # my.get_rho_plot(pltx,tri,n,ht,rho,rholim,fineness) + +progress = 100 +print("%.2f" % progress, 'procent of progress made.') +elapsed = time.time() - tic +print('This took',"%.2f" % round(elapsed/60, 2), 'minutes.') \ No newline at end of file diff --git a/2d/scaling_space_errorestimates.py b/2d/scaling_space_errorestimates.py new file mode 100644 index 0000000..25b6a43 --- /dev/null +++ b/2d/scaling_space_errorestimates.py @@ -0,0 +1,301 @@ +# import myfun as my +import numpy as np +from matplotlib import pyplot as plt +import time +import pickle + +# ----------------------------------------------------------- SCALING BEHAVIOR IN SPACE --------------------------------------------------- +# This script takes the previously computed a posteriori error estimates on each element and adds them in the way in which they constitute +# an upper bound for the actual error in LinfL2-L2H1 norm. Here we fix a number of time steps and investigate the scaling behavior in space, +# i.e. making the maximal mesh size small + +tic = time.time() + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'least-squares' +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'manufactured' #'diff', 'blow-up', 'manufactured' or 'manufactured1' +Nt = 8 # fixed number of time steps + +steps = 5 # number of space refinements used + +# initialize different test cases +if test == 'blow-up' : +# CONVECTION DEOMINATED REGIME i.e. BLOW-UP + + def rho0(x) : + val = 10**(3)*np.exp(-((x[0]-0.5)**2+(x[1]-0.5)**2)/10**(-2)) + return val + + T = 0.0045 # time interval [0,T] + +elif test == 'diff' : +# DIFFUSION DOMINATED REGIME + def rho0(x) : + val = 1.3*np.exp(-25*(x[0]-1/2)**2-25*(x[1]-1/2)**2) + return val + + T = 0.05 # time interval [0,T]# + +elif test == 'manufactured' : +# MANUFACTURED SOLUTION WITH T = 0.5 + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + T = 0.5 # time interval [0,T]# + +elif test == 'manufactured1' : +# MANUFACTURED SOLUTION WITH T = 3 + + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + T = 3 # time interval [0,T]# + +else : + print('Warning: Wrong string input for test.') + + +# SCHEME/PROBLEM PARAMETERS +h = 1 +ht = (T)/(Nt) + +print('ht',ht) + +# initialize objects +hh = [] +LinfL2_max = [] +L2H1_max = [] +theta_early_max = [] +theta_res_max = [] +theta_diff_max = [] +theta_time_max = [] +theta_time_val_max = [] +theta_conv_max = [] +theta_Omega_max = [] + + +for fineness in range(steps) : # varibale in space + h = 1/2*h + hh.append(h) + + # load "exact" error of manufactured solution + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_exact error.p' + [L2_array,LinfL2,L2H1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) # load data + LinfL2_max.append(LinfL2) + L2H1_max.append(L2H1) + + # initialize objects + theta_early_sum = 0 + theta_res_sum = 0 + theta_diff_sum = 0 + theta_time_sum = 0 + theta_time_val_sum = 0 + theta_conv_sum = 0 + theta_Omega_sum = 0 + + for n in range(Nt) : # go through intervals inbetween time steps + + if n == 0 : + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_early,theta_res0,theta_diff0,theta_time0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n+1)+'.p' + [theta_res1,theta_diff1,theta_time1,theta_time_val1,theta_conv1,theta_Omega1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # initialize objects + theta_early_aux = 0 + theta_res_aux = 0 + theta_diff_aux = 0 + theta_time_aux = 0 + theta_time_val_aux = 0 + theta_conv_aux = 0 + theta_conv_aux = 0 + + # sum over all elements + for i in range(len(theta_early)) : + theta_early_aux += theta_early[i] # this is already the squared term + theta_res_aux += theta_res0[i] + theta_res1[i] + theta_diff_aux += theta_diff0[i]+theta_diff1[i] + theta_time_aux += theta_time0[i] + theta_conv_aux += theta_conv1[i] + + # sum over time steps, corresponds to L^2 norm in time + theta_early_sum += ht*theta_early_aux + theta_res_sum += ht*theta_res_aux + theta_diff_sum += ht*theta_diff_aux + theta_time_sum += ht*theta_time_aux + theta_conv_sum += ht*theta_conv_aux + theta_Omega_sum += ht*theta_Omega0**2 + + elif n == Nt-1 : + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_res0,theta_diff0,theta_time0,theta_time_val0,theta_conv0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # initialize objects + theta_res_aux = 0 + theta_diff_aux = 0 + theta_time_aux = 0 + theta_time_val_aux = 0 + theta_conv_aux = 0 + theta_delta_aux = 0 + + # sum over all elements + for i in range(len(theta_res1)) : + theta_res_aux += theta_res0[i] # this is already the squared term + theta_diff_aux += theta_diff0[i] + theta_time_aux += theta_time0[i] + theta_time_val_aux += theta_time_val0[i] + theta_conv_aux += theta_conv0[i] + + # sum over time steps, corresponds to L^2 norm in time + theta_res_sum += ht*theta_res_aux + theta_diff_sum += ht*theta_diff_aux + theta_time_sum += ht*theta_time_aux + theta_time_val_sum += ht*theta_time_val_aux + theta_conv_sum += ht*theta_conv_aux + theta_Omega_sum += ht*theta_Omega0**2 + + else : # n=/=0 + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_res0,theta_diff0,theta_time0,theta_time_val0,theta_conv0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n+1)+'.p' + [theta_res1,theta_diff1,theta_time1,theta_time_val1,theta_conv1,theta_Omega] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # initialize objects + theta_res_aux = 0 + theta_diff_aux = 0 + theta_time_aux = 0 + theta_time_val_aux = 0 + theta_conv_aux = 0 + theta_conv_aux = 0 + + # sum over all elements + for i in range(len(theta_res1)) : + theta_res_aux += theta_res0[i]+theta_res1[i] # this is already the squared term + theta_diff_aux += theta_diff0[i]+theta_diff1[i] + theta_time_aux += theta_time0[i]+theta_time1[i] + theta_time_val_aux += theta_time_val0[i]+theta_time_val1[i] + theta_conv_aux += theta_conv0[i] + theta_conv1[i] + + # sum over time steps, corresponds to L^2 norm in time + theta_res_sum += ht*theta_res_aux + theta_diff_sum += ht*theta_diff_aux + theta_time_sum += ht*theta_time_aux + theta_time_val_sum += ht*theta_time_val_aux + theta_conv_sum += ht*theta_conv_aux + theta_Omega_sum += ht*theta_Omega0**2 + + # save results in order to compute EOC later + theta_early_max.append(theta_early_sum) + theta_res_max.append(theta_res_sum) + theta_diff_max.append(theta_diff_sum) + theta_time_max.append(theta_time_sum) + theta_time_val_max.append(theta_time_val_sum) + theta_conv_max.append(theta_conv_sum) + theta_Omega_max.append(theta_Omega_sum) + + +# print results so far +print('LinfL2_max ',np.round(LinfL2_max,5)) +print('L2H1_max ',np.round(L2H1_max,5)) +print('theta_early_max ',np.round(theta_early_max,5)) +print('theta_res_max ',np.round(theta_res_max,5)) +print('theta_diff_max ',np.round(theta_diff_max,40)) +print('theta_time_max ',np.round(theta_time_max,7)) +print('theta_time_val_max ',np.round(theta_time_val_max,5)) +print('theta_conv_max ',np.round(theta_conv_max,5)) +print('theta_Omega_max ',np.round(theta_Omega_max,5)) +print('mesh sizes ',hh) + +# initialize objects +eoc_LinfL2 = [] +eoc_L2H1 = [] +eoc_early = [] +eoc_res = [] +eoc_diff = [] +eoc_time = [] +eoc_time_val = [] +eoc_conv = [] +eoc_Omega = [] + +# go through all mesh sizes we used +for i in range(steps-1) : + + # COMPUTE THE EOCs + aux_LinfL2 = (np.log((LinfL2_max[i])/(LinfL2_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_L2H1 = (np.log((L2H1_max[i])/(L2H1_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_early = (np.log((theta_early_max[i])/(theta_early_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_res = (np.log((theta_res_max[i])/(theta_res_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_diff = (np.log((theta_diff_max[i])/(theta_diff_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_time = (np.log((theta_time_max[i])/(theta_time_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_time_val = (np.log((theta_time_val_max[i])/(theta_time_val_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_delta = (np.log((theta_conv_max[i])/(theta_conv_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_Omega = (np.log((theta_Omega_max[i])/(theta_Omega_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + + # save the EOCs + eoc_LinfL2.append(aux_LinfL2) + eoc_L2H1.append(aux_L2H1) + eoc_early.append(aux_early) + eoc_res.append(aux_res) + eoc_diff.append(aux_diff) + eoc_time.append(aux_time) + eoc_time_val.append(aux_time_val) + eoc_conv.append(aux_delta) + eoc_Omega.append(aux_Omega) + + +elapsed = time.time() - tic +print('This took',"%.2f" % round(elapsed/60, 2), 'minutes.') + +# print the EOCs +print('eoc_LinfL2 ',np.round(eoc_LinfL2,5)) +print('eoc_L2H1 ',np.round(eoc_L2H1,5)) +print('eoc_early ',np.round(eoc_early,5)) +print('eoc_res ',np.round(eoc_res,6)) +print('eoc_diff ',np.round(eoc_diff,5)) +print('eoc_time ',np.round(eoc_time,5)) +print('eoc_time_val ',np.round(eoc_time_val,5)) +print('eoc_conv ',np.round(eoc_conv,5)) +print('eoc_Omega ',np.round(eoc_Omega,5)) + + +# plot the EOCs +hh = np.array(hh) +fig = plt.figure() +plt.loglog(hh,hh,'g--',hh,hh**2,'y--',hh,LinfL2_max,'b',hh,L2H1_max,'r',hh,theta_time_max,'m',hh,theta_Omega_max,'orange',hh,theta_conv_max,'brown',hh,theta_early_max,'darkgoldenrod',hh,theta_res_max) + +plt.xlabel('hh') +plt.ylabel('error estimates') +plot_title = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error estimates' +plt.title(plot_title) +plt.legend(['h¹','h²','LinfL2','L2H1','theta_time','theta_Omega','theta_conv','theta_early','theta_res']) +image_title = str(steps)+'_scaling_behavior_a-posti-error-estimates.png'#example+'_'+title+'_'+method+'_'+string+'.png' +#plt.savefig(image_title, bbox_inches='tight') +plt.show() \ No newline at end of file diff --git a/2d/scaling_time_errorestimates.py b/2d/scaling_time_errorestimates.py new file mode 100644 index 0000000..c5718be --- /dev/null +++ b/2d/scaling_time_errorestimates.py @@ -0,0 +1,301 @@ +import numpy as np +import myfun as my +#from scipy.sparse.linalg import spsolve +#from mpl_toolkits import mplot3d +from matplotlib import pyplot as plt +import time +import pickle + +# ----------------------------------------------------------- SCALING BEHAVIOR IN SPACE --------------------------------------------------- +# This script takes the previously computed a posteriori error estimates on each element and adds them in the way in which they constitute +# an upper bound for the actual error in LinfL2-L2H1 norm. Here we fix a spatial mesh size and investigate the scaling behavior in time, i.e. +# we make the temporal step size small. + +tic = time.time() + +# SETTINGS FOR SCHEME +nodal_avrg_choice = 'least-squares' +method = 'wasserstein' # using log-mean convective flux +boundary = 'Neumann' +interpol = 'morley' + +test = 'manufactured1' #'diff', 'blow-up', 'manufactured' or 'manufactured1' + + +# initialize different test cases +if test == 'blow-up' : +# CONVECTION DEOMINATED REGIME i.e. BLOW-UP + + def rho0(x) : + val = 10**(3)*np.exp(-((x[0]-0.5)**2+(x[1]-0.5)**2)/10**(-2)) + return val + + T = 0.0045 # time interval [0,T] + +elif test == 'diff' : +# DIFFUSION DOMINATED REGIME + def rho0(x) : + val = 1.3*np.exp(-25*(x[0]-1/2)**2-25*(x[1]-1/2)**2) + return val + + T = 0.05 # time interval [0,T]# + +elif test == 'manufactured' : +# MANUFACTURED SOLUTION WITH T = 0.5 + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + T = 0.5 # time interval [0,T]# + +elif test == 'manufactured1' : +# MANUFACTURED SOLUTION WITH T = 3 + + def rho0(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_rho(x,t) : + val = 1.3/(1+t)*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + def exact_c(x) : + val = 1.3*np.exp(-60*(x[0]-1/2)**2-60*(x[1]-1/2)**2) + return val + + T = 3 # time interval [0,T]# + +else : + print('Warning: Wrong string input for test.') + + +# SCHEME/PROBLEM PARAMETERS + +# initialize objects +hh = [] +LinfL2_max = [] +L2H1_max = [] +theta_early_max = [] +theta_res_max = [] +theta_diff_max = [] +theta_time_max = [] +theta_time_val_max = [] +theta_conv_max = [] +theta_delta_max = [] +theta_Omega_max = [] + +fineness = 4 # fix a number of refinements to the zerost mesh, i.e. fix a spatial mesh size +steps = 8 # number of different time step sizes used + +for Nt in [2,3,4,6,8,16,32,64]: # go through numbers of time steps, [1, 2, 3, 4, 6 ,8 , 16, 32, 64] + + ht = T/Nt + hh.append(ht) + + # initialize objects + theta_early_sum = 0 + theta_res_sum = 0 + theta_diff_sum = 0 + theta_time_sum = 0 + + theta_time_val_sum = 0 + theta_conv_sum = 0 + theta_delta_sum = 0 + theta_Omega_sum = 0 + + # load 'exact' error + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_exact error.p' + [L2_array,LinfL2,L2H1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) # load data + LinfL2_max.append(LinfL2) + L2H1_max.append(L2H1) + + for n in range(Nt) : # go through intervals inbetween time steps + + if n == 0 : + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_early,theta_res0,theta_diff0,theta_time0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n+1)+'.p' + [theta_res1,theta_diff1,theta_time1,theta_time_val1,theta_conv1,theta_Omega1] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # initialize objects + theta_early_aux = 0 + theta_res_aux = 0 + theta_diff_aux = 0 + theta_time_aux = 0 + theta_conv_aux = 0 + theta_delta_aux = 0 + + # sum over all elements + for i in range(len(theta_early)) : + theta_early_aux += theta_early[i] # this is already the squared term + theta_res_aux += theta_res0[i] + theta_res1[i] + theta_diff_aux += theta_diff0[i]+theta_diff1[i] + theta_time_aux += theta_time0[i] + theta_conv_aux += theta_conv1[i] + + # sum over time steps, corresponds to L^2 norm in time + theta_early_sum += ht*theta_early_aux + theta_res_sum += ht*theta_res_aux + theta_diff_sum += ht*theta_diff_aux + theta_time_sum += ht*theta_time_aux + theta_delta_sum += ht*theta_delta_aux + theta_Omega_sum += ht*theta_Omega0**2 + + elif n == Nt-1 : + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_res0,theta_diff0,theta_time0,theta_time_val0,theta_conv0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # initialize objects + theta_res_aux = 0 + theta_diff_aux = 0 + theta_time_aux = 0 + theta_time_val_aux = 0 + theta_conv_aux = 0 + theta_delta_aux = 0 + + # sum over all elements + for i in range(len(theta_res1)) : + theta_res_aux += theta_res0[i] # this is already the squared term + theta_diff_aux += theta_diff0[i] + theta_time_aux += theta_time0[i] + theta_time_val_aux += theta_time_val0[i] + theta_conv_aux += theta_conv0[i] + + # sum over time steps, corresponds to L^2 norm in time + theta_res_sum += ht*theta_res_aux + theta_diff_sum += ht*theta_diff_aux + theta_time_sum += ht*theta_time_aux + theta_time_val_sum += ht*theta_time_val_aux + theta_conv_sum += ht*theta_conv_aux + theta_Omega_sum += ht*theta_Omega0**2 + + else : # n=/=0 + + # load a posteriori error estimates + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n)+'.p' + [theta_res0,theta_diff0,theta_time0,theta_time_val0,theta_conv0,theta_Omega0] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + pickle_name = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error at time step'+str(n+1)+'.p' + [theta_res1,theta_diff1,theta_time1,theta_time_val1,theta_conv1,theta_Omega] = pickle.load(open('pickle_files/'+pickle_name,'rb')) + + # initialize objects + theta_res_aux = 0 + theta_diff_aux = 0 + theta_time_aux = 0 + theta_time_val_aux = 0 + theta_conv_aux = 0 + theta_delta_aux = 0 + + # sum over all elements + for i in range(len(theta_res1)) : + theta_res_aux += theta_res0[i]+theta_res1[i] # this is already the squared term + theta_diff_aux += theta_diff0[i]+theta_diff1[i] + theta_time_aux += theta_time0[i]+theta_time1[i] + theta_time_val_aux += theta_time_val0[i]+theta_time_val1[i] + theta_conv_aux += theta_conv0[i]+theta_conv1[i] + + # sum over time steps, corresponds to L^2 norm in time + theta_res_sum += ht*theta_res_aux + theta_diff_sum += ht*theta_diff_aux + theta_time_sum += ht*theta_time_aux + theta_time_val_sum += ht*theta_time_val_aux + theta_conv_sum += ht*theta_conv_aux + theta_Omega_sum += ht*theta_Omega0**2 + + # save results in order to compute EOC later + theta_early_max.append(theta_early_sum) + theta_res_max.append(theta_res_sum) + theta_diff_max.append(theta_diff_sum) + theta_time_max.append(theta_time_sum) + theta_time_val_max.append(theta_time_val_sum) + theta_conv_max.append(theta_conv_sum) + theta_Omega_max.append(theta_Omega_sum) + +# print results so far +print('LinfL2_max ',np.round(LinfL2_max,5)) +print('L2H1_max ',np.round(L2H1_max,5)) +print('theta_early_max ',np.round(theta_early_max,5)) +print('theta_res_max ',np.round(theta_res_max,5)) +print('theta_diff_max ',np.round(theta_diff_max,40)) +print('theta_time_max ',np.round(theta_time_max,6)) +print('theta_time_val_max ',np.round(theta_time_val_max,5)) +print('theta_conv_max ',np.round(theta_conv_max,5)) +print('theta_Omega_max ',np.round(theta_Omega_max,5)) +print('mesh sizes ',hh) + +# initialize objects +eoc_LinfL2 = [] +eoc_L2H1 = [] +eoc_early = [] +eoc_res = [] +eoc_diff = [] +eoc_time = [] +eoc_time_val = [] +eoc_conv = [] +eoc_Omega = [] + +# go through all numbers of number of step sizes we used +for i in range(steps-1) : + + # COMPUTE EOCs + aux_LinfL2 = (np.log((LinfL2_max[i])/(LinfL2_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_L2H1 = (np.log((L2H1_max[i])/(L2H1_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_early = (np.log((theta_early_max[i])/(theta_early_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_res = (np.log((theta_res_max[i])/(theta_res_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_diff = (np.log((theta_diff_max[i])/(theta_diff_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_time = (np.log((theta_time_max[i])/(theta_time_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_time_val = (np.log((theta_time_val_max[i])/(theta_time_val_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_conv = (np.log((theta_conv_max[i])/(theta_conv_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + aux_Omega = (np.log((theta_Omega_max[i])/(theta_Omega_max[i+1])))/(np.log((hh[i])/(hh[i+1]))) + + # save the EOCs + eoc_LinfL2.append(aux_LinfL2) + eoc_L2H1.append(aux_L2H1) + eoc_early.append(aux_early) + eoc_res.append(aux_res) + eoc_diff.append(aux_diff) + eoc_time.append(aux_time) + eoc_time_val.append(aux_time_val) + eoc_conv.append(aux_conv) + eoc_Omega.append(aux_Omega) + + +elapsed = time.time() - tic +print('This took',"%.2f" % round(elapsed/60, 2), 'minutes.') + +# print the EOCs +print('eoc_LinfL2 ',np.round(eoc_LinfL2,5)) +print('eoc_L2H1 ',np.round(eoc_L2H1,5)) +print('eoc_early ',np.round(eoc_early,5)) +print('eoc_res ',np.round(eoc_res,5)) +print('eoc_diff ',np.round(eoc_diff,5)) +print('eoc_time ',np.round(eoc_time,5)) +print('eoc_time_val ',np.round(eoc_time_val,5)) +print('eoc_conv ',np.round(eoc_conv,5)) +print('eoc_Omega ',np.round(eoc_Omega,5)) + + +# plot the EOCs +hh = np.array(hh) +fig = plt.figure() +plt.loglog(hh,hh,'g--',hh,hh**2,'y--',hh,LinfL2_max,'b',hh,L2H1_max,'r',hh,theta_time_max,'m',hh,theta_Omega_max,'orange',hh,theta_conv_max,'brown',hh,theta_early_max,'darkgoldenrod',hh,theta_res_max) + +plt.xlabel('hh') +plt.ylabel('error estimates') +plot_title = test+'_fineness'+str(fineness)+'_Nt'+str(Nt)+'_'+interpol+'_a posti error estimates' +plt.title(plot_title) +plt.legend(['h¹','h²','LinfL2','L2H1','theta_time','theta_Omega','theta_conv','theta_early','theta_res']) +image_title = str(steps)+'_scaling_behavior_a-posti-error-estimates.png'#example+'_'+title+'_'+method+'_'+string+'.png' +#plt.savefig(image_title, bbox_inches='tight') +# plt.show() -- GitLab